[
  {
    "path": ".gitignore",
    "content": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packaging\n.Python\nbuild/\ndevelop-eggs/\ndist/\ndownloads/\neggs/\n.eggs/\nlib/\nlib64/\nparts/\nsdist/\nvar/\nwheels/\n*.egg-info/\n.installed.cfg\n*.egg\nMANIFEST\n\n# PyInstaller\n#  Usually these files are written by a python script from a template\n#  before PyInstaller builds the exe, so as to inject date/other infos into it.\n*.manifest\n*.spec\n\n# Installer logs\npip-log.txt\npip-delete-this-directory.txt\n\n# Unit test / coverage reports\nhtmlcov/\n.tox/\n.coverage\n.coverage.*\n.cache\nnosetests.xml\ncoverage.xml\n*.cover\n.hypothesis/\n.pytest_cache/\n\n# Translations\n*.mo\n*.pot\n\n# Django stuff:\n*.log\nlocal_settings.py\ndb.sqlite3\n\n# Flask stuff:\ninstance/\n.webassets-cache\n\n# Scrapy stuff:\n.scrapy\n\n# Sphinx documentation\ndocs/en/_build/\ndocs/zh_cn/_build/\n\n# PyBuilder\ntarget/\n\n# Jupyter Notebook\n.ipynb_checkpoints\n\n# pyenv\n.python-version\n\n# celery beat schedule file\ncelerybeat-schedule\n\n# SageMath parsed files\n*.sage.py\n\n# Environments\n.env\n.venv\nenv/\nvenv/\nENV/\nenv.bak/\nvenv.bak/\n\n# Spyder project settings\n.spyderproject\n.spyproject\n\n# Rope project settings\n.ropeproject\n\n# mkdocs documentation\n/site\n\n# mypy\n.mypy_cache/\n\ndata/\ndata\n.vscode\n.idea\n.DS_Store\n\n# custom\n*.pkl\n*.pkl.json\n*.log.json\ndocs/modelzoo_statistics.md\nmmdet/.mim\nwork_dirs/\nckpt/\n\n# Pytorch\n*.pth\n*.py~\n*.sh~\n"
  },
  {
    "path": "LICENSE",
    "content": "Copyright 2018-2023 OpenMMLab. All rights reserved.\n\n                                 Apache License\n                           Version 2.0, January 2004\n                        http://www.apache.org/licenses/\n\n   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION\n\n   1. Definitions.\n\n      \"License\" shall mean the terms and conditions for use, reproduction,\n      and distribution as defined by Sections 1 through 9 of this document.\n\n      \"Licensor\" shall mean the copyright owner or entity authorized by\n      the copyright owner that is granting the License.\n\n      \"Legal Entity\" shall mean the union of the acting entity and all\n      other entities that control, are controlled by, or are under common\n      control with that entity. For the purposes of this definition,\n      \"control\" means (i) the power, direct or indirect, to cause the\n      direction or management of such entity, whether by contract or\n      otherwise, or (ii) ownership of fifty percent (50%) or more of the\n      outstanding shares, or (iii) beneficial ownership of such entity.\n\n      \"You\" (or \"Your\") shall mean an individual or Legal Entity\n      exercising permissions granted by this License.\n\n      \"Source\" form shall mean the preferred form for making modifications,\n      including but not limited to software source code, documentation\n      source, and configuration files.\n\n      \"Object\" form shall mean any form resulting from mechanical\n      transformation or translation of a Source form, including but\n      not limited to compiled object code, generated documentation,\n      and conversions to other media types.\n\n      \"Work\" shall mean the work of authorship, whether in Source or\n      Object form, made available under the License, as indicated by a\n      copyright notice that is included in or attached to the work\n      (an example is provided in the Appendix below).\n\n      \"Derivative Works\" shall mean any work, whether in Source or Object\n      form, that is based on (or derived from) the Work and for which the\n      editorial revisions, annotations, elaborations, or other modifications\n      represent, as a whole, an original work of authorship. For the purposes\n      of this License, Derivative Works shall not include works that remain\n      separable from, or merely link (or bind by name) to the interfaces of,\n      the Work and Derivative Works thereof.\n\n      \"Contribution\" shall mean any work of authorship, including\n      the original version of the Work and any modifications or additions\n      to that Work or Derivative Works thereof, that is intentionally\n      submitted to Licensor for inclusion in the Work by the copyright owner\n      or by an individual or Legal Entity authorized to submit on behalf of\n      the copyright owner. For the purposes of this definition, \"submitted\"\n      means any form of electronic, verbal, or written communication sent\n      to the Licensor or its representatives, including but not limited to\n      communication on electronic mailing lists, source code control systems,\n      and issue tracking systems that are managed by, or on behalf of, the\n      Licensor for the purpose of discussing and improving the Work, but\n      excluding communication that is conspicuously marked or otherwise\n      designated in writing by the copyright owner as \"Not a Contribution.\"\n\n      \"Contributor\" shall mean Licensor and any individual or Legal Entity\n      on behalf of whom a Contribution has been received by Licensor and\n      subsequently incorporated within the Work.\n\n   2. Grant of Copyright License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      copyright license to reproduce, prepare Derivative Works of,\n      publicly display, publicly perform, sublicense, and distribute the\n      Work and such Derivative Works in Source or Object form.\n\n   3. Grant of Patent License. Subject to the terms and conditions of\n      this License, each Contributor hereby grants to You a perpetual,\n      worldwide, non-exclusive, no-charge, royalty-free, irrevocable\n      (except as stated in this section) patent license to make, have made,\n      use, offer to sell, sell, import, and otherwise transfer the Work,\n      where such license applies only to those patent claims licensable\n      by such Contributor that are necessarily infringed by their\n      Contribution(s) alone or by combination of their Contribution(s)\n      with the Work to which such Contribution(s) was submitted. If You\n      institute patent litigation against any entity (including a\n      cross-claim or counterclaim in a lawsuit) alleging that the Work\n      or a Contribution incorporated within the Work constitutes direct\n      or contributory patent infringement, then any patent licenses\n      granted to You under this License for that Work shall terminate\n      as of the date such litigation is filed.\n\n   4. Redistribution. You may reproduce and distribute copies of the\n      Work or Derivative Works thereof in any medium, with or without\n      modifications, and in Source or Object form, provided that You\n      meet the following conditions:\n\n      (a) You must give any other recipients of the Work or\n          Derivative Works a copy of this License; and\n\n      (b) You must cause any modified files to carry prominent notices\n          stating that You changed the files; and\n\n      (c) You must retain, in the Source form of any Derivative Works\n          that You distribute, all copyright, patent, trademark, and\n          attribution notices from the Source form of the Work,\n          excluding those notices that do not pertain to any part of\n          the Derivative Works; and\n\n      (d) If the Work includes a \"NOTICE\" text file as part of its\n          distribution, then any Derivative Works that You distribute must\n          include a readable copy of the attribution notices contained\n          within such NOTICE file, excluding those notices that do not\n          pertain to any part of the Derivative Works, in at least one\n          of the following places: within a NOTICE text file distributed\n          as part of the Derivative Works; within the Source form or\n          documentation, if provided along with the Derivative Works; or,\n          within a display generated by the Derivative Works, if and\n          wherever such third-party notices normally appear. The contents\n          of the NOTICE file are for informational purposes only and\n          do not modify the License. You may add Your own attribution\n          notices within Derivative Works that You distribute, alongside\n          or as an addendum to the NOTICE text from the Work, provided\n          that such additional attribution notices cannot be construed\n          as modifying the License.\n\n      You may add Your own copyright statement to Your modifications and\n      may provide additional or different license terms and conditions\n      for use, reproduction, or distribution of Your modifications, or\n      for any such Derivative Works as a whole, provided Your use,\n      reproduction, and distribution of the Work otherwise complies with\n      the conditions stated in this License.\n\n   5. Submission of Contributions. Unless You explicitly state otherwise,\n      any Contribution intentionally submitted for inclusion in the Work\n      by You to the Licensor shall be under the terms and conditions of\n      this License, without any additional terms or conditions.\n      Notwithstanding the above, nothing herein shall supersede or modify\n      the terms of any separate license agreement you may have executed\n      with Licensor regarding such Contributions.\n\n   6. Trademarks. This License does not grant permission to use the trade\n      names, trademarks, service marks, or product names of the Licensor,\n      except as required for reasonable and customary use in describing the\n      origin of the Work and reproducing the content of the NOTICE file.\n\n   7. Disclaimer of Warranty. Unless required by applicable law or\n      agreed to in writing, Licensor provides the Work (and each\n      Contributor provides its Contributions) on an \"AS IS\" BASIS,\n      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n      implied, including, without limitation, any warranties or conditions\n      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A\n      PARTICULAR PURPOSE. You are solely responsible for determining the\n      appropriateness of using or redistributing the Work and assume any\n      risks associated with Your exercise of permissions under this License.\n\n   8. Limitation of Liability. In no event and under no legal theory,\n      whether in tort (including negligence), contract, or otherwise,\n      unless required by applicable law (such as deliberate and grossly\n      negligent acts) or agreed to in writing, shall any Contributor be\n      liable to You for damages, including any direct, indirect, special,\n      incidental, or consequential damages of any character arising as a\n      result of this License or out of the use or inability to use the\n      Work (including but not limited to damages for loss of goodwill,\n      work stoppage, computer failure or malfunction, or any and all\n      other commercial damages or losses), even if such Contributor\n      has been advised of the possibility of such damages.\n\n   9. Accepting Warranty or Additional Liability. While redistributing\n      the Work or Derivative Works thereof, You may choose to offer,\n      and charge a fee for, acceptance of support, warranty, indemnity,\n      or other liability obligations and/or rights consistent with this\n      License. However, in accepting such obligations, You may act only\n      on Your own behalf and on Your sole responsibility, not on behalf\n      of any other Contributor, and only if You agree to indemnify,\n      defend, and hold each Contributor harmless for any liability\n      incurred by, or claims asserted against, such Contributor by reason\n      of your accepting any such warranty or additional liability.\n\n   END OF TERMS AND CONDITIONS\n\n   APPENDIX: How to apply the Apache License to your work.\n\n      To apply the Apache License to your work, attach the following\n      boilerplate notice, with the fields enclosed by brackets \"[]\"\n      replaced with your own identifying information. (Don't include\n      the brackets!)  The text should be enclosed in the appropriate\n      comment syntax for the file format. We also recommend that a\n      file or class name and description of purpose be included on the\n      same \"printed page\" as the copyright notice for easier\n      identification within third-party archives.\n\n   Copyright 2018-2023 OpenMMLab.\n\n   Licensed under the Apache License, Version 2.0 (the \"License\");\n   you may not use this file except in compliance with the License.\n   You may obtain a copy of the License at\n\n       http://www.apache.org/licenses/LICENSE-2.0\n\n   Unless required by applicable law or agreed to in writing, software\n   distributed under the License is distributed on an \"AS IS\" BASIS,\n   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n   See the License for the specific language governing permissions and\n   limitations under the License.\n"
  },
  {
    "path": "README.md",
    "content": "# Prompt-Segment-Anything\nThis is an implementation of zero-shot instance segmentation using [Segment Anything](https://github.com/facebookresearch/segment-anything). Thanks to the authors of Segment Anything for their wonderful work! \n\nThis repository is based on [MMDetection](https://github.com/open-mmlab/mmdetection) and includes some code from [H-Deformable-DETR](https://github.com/HDETR/H-Deformable-DETR) and [FocalNet-DINO](https://github.com/FocalNet/FocalNet-DINO).\n\n![example1](assets/example1.jpg)\n\n## News\n\n**2023.04.12** Multimask output mode and cascade prompt mode is available now.\n\n**2023.04.11** Our [demo](https://huggingface.co/spaces/rockeycoss/Prompt-Segment-Anything-Demo) is available now. Please feel free to check it out.\n\n**2023.04.11** [Swin-L+H-Deformable-DETR + SAM](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h.py)/[FocalNet-L+DINO + SAM](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h.py) achieves strong COCO instance segmentation results: mask AP=46.8/49.1 by simply prompting SAM with boxes predicted by Swin-L+H-Deformable-DETR/FocalNet-L+DINO. (mask AP=46.5 based on ViTDet)🍺\n\n## Catalog\n\n- [x] Support Swin-L+H-Deformable-DETR+SAM\n- [x] Support FocalNet-L+DINO+SAM\n- [x] Support R50+H-Deformable-DETR+SAM/Swin-T+H-Deformable-DETR\n- [x] Support HuggingFace gradio demo\n- [x] Support cascade prompts (box prompt + mask prompt)\n\n## Box-as-Prompt Results\n\n|         Detector         |    SAM    |    multimask ouput    | Detector's Box AP | Mask AP |                            Config                            |\n| :--------------------- | :-------: | :---------------: | :-----: | :----------------------------------------------------------: | ----------------------- |\n|  R50+H-Deformable-DETR   | sam-vit-b | :x: |       50.0        |  38.2   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/r50-hdetr_sam-vit-b.py) |\n| R50+H-Deformable-DETR | sam-vit-b | :heavy_check_mark: | 50.0 | 39.9 | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/r50-hdetr_sam-vit-b_best-in-multi.py) |\n|  R50+H-Deformable-DETR   | sam-vit-l | :x: |       50.0        |  41.5   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/r50-hdetr_sam-vit-l.py) |\n| Swin-T+H-Deformable-DETR | sam-vit-b | :x: |       53.2        |  40.0   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-t-hdetr_sam-vit-b.py) |\n| Swin-T+H-Deformable-DETR | sam-vit-l | :x: |       53.2        |  43.5   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-t-hdetr_sam-vit-l.py) |\n| Swin-L+H-Deformable-DETR | sam-vit-b | :x: |       58.0        |  42.5   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-b.py) |\n| Swin-L+H-Deformable-DETR | sam-vit-l | :x: |       58.0        |  46.3   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-l.py) |\n| Swin-L+H-Deformable-DETR | sam-vit-h | :x: |       58.0        |  46.8   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h.py) |\n|     FocalNet-L+DINO      | sam-vit-b | :x: |       63.2        |  44.5   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-b.py) |\n|     FocalNet-L+DINO      | sam-vit-l | :x: |       63.2        |  48.6   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-l.py) |\n|     FocalNet-L+DINO      | sam-vit-h | :x: |       63.2        |  49.1   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h.py) |\n\n## Cascade-Prompt Results\n\n|       Detector        |    SAM    |  multimask ouput   | Detector's Box AP | Mask AP | Config                                                       |\n| :------------------- | :-------: | :----------------: | :---------------: | :-----: | ------------------------------------------------------------ |\n| R50+H-Deformable-DETR | sam-vit-b |        :x:         |       50.0        |  38.8   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/r50-hdetr_sam-vit-b_cascade.py) |\n| R50+H-Deformable-DETR | sam-vit-b | :heavy_check_mark: |       50.0        |  40.5   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/r50-hdetr_sam-vit-b_best-in-multi_cascade.py) |\n| Swin-L+H-Deformable-DETR | sam-vit-h | :heavy_check_mark: |       58.0        |  47.3   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h_best-in-multi_cascade.py) |\n|     FocalNet-L+DINO      | sam-vit-h | :heavy_check_mark: |       63.2        |  49.6   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h_best-in-multi_cascade.py) |\n\n***Note***\n\n**multimask ouput**: If multimask output is :heavy_check_mark:, SAM will predict three masks for each prompt, and the segmentation result will be the one with the highest predicted IoU. Otherwise, if multimask output is :x:, SAM will return only one mask for each prompt, which will be used as the segmentation result.\n\n**cascade-prompt**: In the cascade-prompt setting, the segmentation process involves two stages. In the first stage, a coarse mask is predicted with a bounding box prompt. The second stage then utilizes both the bounding box and the coarse mask as prompts to predict the final segmentation result. Note that if multimask output is :heavy_check_mark:, the first stage will predict three coarse masks, and the second stage will use the mask with the highest predicted IoU as the prompt.\n\n## Installation\n\n🍺🍺🍺 Add dockerhub enviroment \n\n```\ndocker pull kxqt/prompt-sam-torch1.12-cuda11.6:20230410\nnvidia-docker run -it --shm-size=4096m -v {your_path}:{path_in_docker} kxqt/prompt-sam-torch1.12-cuda11.6:20230410\n```\n\nWe test the models under `python=3.7.10,pytorch=1.10.2,cuda=10.2`. Other versions might be available as well.\n\n1. Clone this repository\n\n```\ngit clone https://github.com/RockeyCoss/Instance-Segment-Anything\ncd Instance-Segment-Anything\n```\n\n2. Install PyTorch\n\n```bash\n# an example\npip install torch torchvision\n```\n\n3. Install MMCV\n\n```\npip install -U openmim\nmim install \"mmcv-full<2.0.0\"\n```\n\n4. Install MMDetection's requirements\n\n```\npip install -r requirements.txt\n```\n\n5. Compile CUDA operators\n\n```bash\ncd projects/instance_segment_anything/ops\npython setup.py build install\ncd ../../..\n```\n\nPlease note that the ``mmdet`` package does not need to be installed. If your environment already has the ``mmdet`` package installed, you can run the following command before executing other scripts:\n\n```bash\nexport PYTHONPATH=$(pwd)\n```\n\n## Prepare COCO Dataset\n\nPlease refer to [data preparation](https://mmdetection.readthedocs.io/en/latest/user_guides/dataset_prepare.html).\n\n## Prepare Checkpoints\n\n1. Install wget\n\n```\npip install wget\n```\n\n2. SAM checkpoints\n\n```bash\nmkdir ckpt\ncd ckpt\npython -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth\npython -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth\npython -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth\ncd ..\n```\n\n3. Here are the checkpoints for the detection models. You can download only the checkpoints you need.\n\n```bash\n# R50+H-Deformable-DETR\ncd ckpt\npython -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1/r50_hybrid_branch_lambda1_group6_t1500_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o r50_hdetr.pth\ncd ..\npython tools/convert_ckpt.py ckpt/r50_hdetr.pth ckpt/r50_hdetr.pth\n\n# Swin-T+H-Deformable-DETR\ncd ckpt\npython -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1/swin_tiny_hybrid_branch_lambda1_group6_t1500_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o swin_t_hdetr.pth\ncd ..\npython tools/convert_ckpt.py ckpt/swin_t_hdetr.pth ckpt/swin_t_hdetr.pth\n\n# Swin-L+H-Deformable-DETR\ncd ckpt\npython -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1/decay0.05_drop_path0.5_swin_large_hybrid_branch_lambda1_group6_t1500_n900_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o swin_l_hdetr.pth\ncd ..\npython tools/convert_ckpt.py ckpt/swin_l_hdetr.pth ckpt/swin_l_hdetr.pth\n\n# FocalNet-L+DINO\ncd ckpt\npython -m wget https://projects4jw.blob.core.windows.net/focalnet/release/detection/focalnet_large_fl4_o365_finetuned_on_coco.pth -o focalnet_l_dino.pth\ncd ..\npython tools/convert_ckpt.py ckpt/focalnet_l_dino.pth ckpt/focalnet_l_dino.pth\n```\n\n## Run Evaluation\n\n1. Evaluate Metrics\n\n```bash\n# single GPU\npython tools/test.py path/to/the/config/file --eval segm\n# multiple GPUs\nbash tools/dist_test.sh path/to/the/config/file num_gpus --eval segm\n```\n\n2. Visualize Segmentation Results\n\n```bash\npython tools/test.py path/to/the/config/file --show-dir path/to/the/visualization/results\n```\n## Gradio Demo\n\nWe also provide a UI for displaying the segmentation results that is built with gradio. To launch the demo, simply run the following command in a terminal:\n\n```bash\npip install gradio\npython app.py\n```\n\nThis demo is also hosted on HuggingFace [here](https://huggingface.co/spaces/rockeycoss/Prompt-Segment-Anything-Demo).\n\n## More Segmentation Examples\n\n![example2](assets/example2.jpg)\n![example3](assets/example3.jpg)\n![example4](assets/example4.jpg)\n![example5](assets/example5.jpg)\n\n## Citation\n\n**Segment Anything**\n\n```latex\n@article{kirillov2023segany,\n  title={Segment Anything}, \n  author={Kirillov, Alexander and Mintun, Eric and Ravi, Nikhila and Mao, Hanzi and Rolland, Chloe and Gustafson, Laura and Xiao, Tete and Whitehead, Spencer and Berg, Alexander C. and Lo, Wan-Yen and Doll{\\'a}r, Piotr and Girshick, Ross},\n  journal={arXiv:2304.02643},\n  year={2023}\n}\n```\n**H-Deformable-DETR**\n\n```latex\n@article{jia2022detrs,\n  title={DETRs with Hybrid Matching},\n  author={Jia, Ding and Yuan, Yuhui and He, Haodi and Wu, Xiaopei and Yu, Haojun and Lin, Weihong and Sun, Lei and Zhang, Chao and Hu, Han},\n  journal={arXiv preprint arXiv:2207.13080},\n  year={2022}\n}\n```\n**Swin Transformer**\n\n```latex\n@inproceedings{liu2021Swin,\n  title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows},\n  author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining},\n  booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},\n  year={2021}\n}\n```\n**DINO**\n\n```latex\n@misc{zhang2022dino,\n      title={DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection}, \n      author={Hao Zhang and Feng Li and Shilong Liu and Lei Zhang and Hang Su and Jun Zhu and Lionel M. Ni and Heung-Yeung Shum},\n      year={2022},\n      eprint={2203.03605},\n      archivePrefix={arXiv},\n      primaryClass={cs.CV}\n}\n```\n**FocalNet**\n\n```latex\n@misc{yang2022focalnet,  \n  author = {Yang, Jianwei and Li, Chunyuan and Dai, Xiyang and Yuan, Lu and Gao, Jianfeng},\n  title = {Focal Modulation Networks},\n  publisher = {arXiv},\n  year = {2022},\n}\n```\n"
  },
  {
    "path": "app.py",
    "content": "import os\n\nSPACE_ID = os.getenv('SPACE_ID')\nif SPACE_ID is not None:\n    # running on huggingface space\n    os.system(r'mkdir ckpt')\n    os.system(\n        r'python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth -o ckpt/sam_vit_b_01ec64.pth')\n    os.system(\n        r'python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth -o ckpt/sam_vit_l_0b3195.pth')\n    os.system(\n        r'python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth -o ckpt/sam_vit_h_4b8939.pth')\n\n    os.system(\n        r'python -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1'\n        r'/r50_hybrid_branch_lambda1_group6_t1500_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o ckpt/r50_hdetr.pth')\n    os.system(\n        r'python -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1'\n        r'/swin_tiny_hybrid_branch_lambda1_group6_t1500_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o ckpt/swin_t_hdetr.pth')\n    os.system(\n        r'python -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1/decay0.05_drop_path0'\n        r'.5_swin_large_hybrid_branch_lambda1_group6_t1500_n900_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o ckpt/swin_l_hdetr.pth')\n    os.system(r'python -m wget https://projects4jw.blob.core.windows.net/focalnet/release/detection'\n              r'/focalnet_large_fl4_o365_finetuned_on_coco.pth -o ckpt/focalnet_l_dino.pth')\n\n    os.system(r'python tools/convert_ckpt.py ckpt/r50_hdetr.pth ckpt/r50_hdetr.pth')\n    os.system(r'python tools/convert_ckpt.py ckpt/swin_t_hdetr.pth ckpt/swin_t_hdetr.pth')\n    os.system(r'python tools/convert_ckpt.py ckpt/swin_l_hdetr.pth ckpt/swin_l_hdetr.pth')\n    os.system(r'python tools/convert_ckpt.py ckpt/focalnet_l_dino.pth ckpt/focalnet_l_dino.pth')\nimport warnings\nfrom collections import OrderedDict\nfrom pathlib import Path\n\nimport gradio as gr\nimport numpy as np\nimport torch\n\nimport mmcv\nfrom mmcv import Config\nfrom mmcv.ops import RoIPool\nfrom mmcv.parallel import collate, scatter\nfrom mmcv.runner import load_checkpoint\nfrom mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE\n\nfrom mmdet.core import get_classes\nfrom mmdet.datasets import (CocoDataset, replace_ImageToTensor)\nfrom mmdet.datasets.pipelines import Compose\nfrom mmdet.models import build_detector\nfrom mmdet.utils import (compat_cfg, replace_cfg_vals, setup_multi_processes,\n                         update_data_root)\n\nconfig_dict = OrderedDict([('r50-hdetr_sam-vit-b', 'projects/configs/hdetr/r50-hdetr_sam-vit-b.py'),\n                           ('r50-hdetr_sam-vit-l', 'projects/configs/hdetr/r50-hdetr_sam-vit-l.py'),\n                           ('swin-t-hdetr_sam-vit-b', 'projects/configs/hdetr/swin-t-hdetr_sam-vit-b.py'),\n                           ('swin-t-hdetr_sam-vit-l', 'projects/configs/hdetr/swin-t-hdetr_sam-vit-l.py'),\n                           ('swin-l-hdetr_sam-vit-b', 'projects/configs/hdetr/swin-l-hdetr_sam-vit-b.py'),\n                           ('swin-l-hdetr_sam-vit-l', 'projects/configs/hdetr/swin-l-hdetr_sam-vit-l.py'),\n                           # ('swin-l-hdetr_sam-vit-h', 'projects/configs/hdetr/swin-l-hdetr_sam-vit-l.py'),\n                           ('focalnet-l-dino_sam-vit-b', 'projects/configs/focalnet_dino/focalnet-l-dino_sam-vit-b.py'),\n                           # ('focalnet-l-dino_sam-vit-l', 'projects/configs/focalnet_dino/focalnet-l-dino_sam-vit-l.py'),\n                           # ('focalnet-l-dino_sam-vit-h', 'projects/configs/focalnet_dino/focalnet-l-dino_sam-vit-h.py')\n                           ])\n\n\ndef init_demo_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):\n    \"\"\"Initialize a detector from config file.\n    Args:\n        config (str, :obj:`Path`, or :obj:`mmcv.Config`): Config file path,\n            :obj:`Path`, or the config object.\n        checkpoint (str, optional): Checkpoint path. If left as None, the model\n            will not load any weights.\n        cfg_options (dict): Options to override some settings in the used\n            config.\n    Returns:\n        nn.Module: The constructed detector.\n    \"\"\"\n    if isinstance(config, (str, Path)):\n        config = mmcv.Config.fromfile(config)\n    elif not isinstance(config, mmcv.Config):\n        raise TypeError('config must be a filename or Config object, '\n                        f'but got {type(config)}')\n    if cfg_options is not None:\n        config.merge_from_dict(cfg_options)\n    if 'pretrained' in config.model:\n        config.model.pretrained = None\n    elif (config.model.get('backbone', None) is not None\n          and 'init_cfg' in config.model.backbone):\n        config.model.backbone.init_cfg = None\n    config.model.train_cfg = None\n    model = build_detector(config.model, test_cfg=config.get('test_cfg'))\n    if checkpoint is not None:\n        checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n        if 'CLASSES' in checkpoint.get('meta', {}):\n            model.CLASSES = checkpoint['meta']['CLASSES']\n        else:\n            warnings.simplefilter('once')\n            warnings.warn('Class names are not saved in the checkpoint\\'s '\n                          'meta data, use COCO classes by default.')\n            model.CLASSES = get_classes('coco')\n    model.cfg = config  # save the config in the model for convenience\n    model.to(device)\n    model.eval()\n\n    if device == 'npu':\n        from mmcv.device.npu import NPUDataParallel\n        model = NPUDataParallel(model)\n        model.cfg = config\n\n    return model\n\n\ndef inference_demo_detector(model, imgs):\n    \"\"\"Inference image(s) with the detector.\n    Args:\n        model (nn.Module): The loaded detector.\n        imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):\n           Either image files or loaded images.\n    Returns:\n        If imgs is a list or tuple, the same length list type results\n        will be returned, otherwise return the detection results directly.\n    \"\"\"\n    ori_img = imgs\n    if isinstance(imgs, (list, tuple)):\n        is_batch = True\n    else:\n        imgs = [imgs]\n        is_batch = False\n\n    cfg = model.cfg\n    device = next(model.parameters()).device  # model device\n\n    if isinstance(imgs[0], np.ndarray):\n        cfg = cfg.copy()\n        # set loading pipeline type\n        cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'\n\n    cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)\n    test_pipeline = Compose(cfg.data.test.pipeline)\n\n    datas = []\n    for img in imgs:\n        # prepare data\n        if isinstance(img, np.ndarray):\n            # directly add img\n            data = dict(img=img)\n        else:\n            # add information into dict\n            data = dict(img_info=dict(filename=img), img_prefix=None)\n        # build the data pipeline\n        data = test_pipeline(data)\n        datas.append(data)\n\n    data = collate(datas, samples_per_gpu=len(imgs))\n    # just get the actual data from DataContainer\n    data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]\n    data['img'] = [img.data[0] for img in data['img']]\n    if next(model.parameters()).is_cuda:\n        # scatter to specified GPU\n        data = scatter(data, [device])[0]\n    else:\n        for m in model.modules():\n            assert not isinstance(\n                m, RoIPool\n            ), 'CPU inference with RoIPool is not supported currently.'\n\n    # forward the model\n    with torch.no_grad():\n        results = model(return_loss=False, rescale=True, **data, ori_img=ori_img)\n\n    if not is_batch:\n        return results[0]\n    else:\n        return results\n\n\ndef inference(img, config):\n    if img is None:\n        return None\n    print(f\"config: {config}\")\n    config = config_dict[config]\n    cfg = Config.fromfile(config)\n\n    # replace the ${key} with the value of cfg.key\n    cfg = replace_cfg_vals(cfg)\n\n    # update data root according to MMDET_DATASETS\n    update_data_root(cfg)\n\n    cfg = compat_cfg(cfg)\n\n    # set multi-process settings\n    setup_multi_processes(cfg)\n\n    # import modules from plguin/xx, registry will be updated\n    if hasattr(cfg, 'plugin'):\n        if cfg.plugin:\n            import importlib\n            if hasattr(cfg, 'plugin_dir'):\n                plugin_dir = cfg.plugin_dir\n                _module_dir = os.path.dirname(plugin_dir)\n                _module_dir = _module_dir.split('/')\n                _module_path = _module_dir[0]\n\n                for m in _module_dir[1:]:\n                    _module_path = _module_path + '.' + m\n                print(_module_path)\n                plg_lib = importlib.import_module(_module_path)\n            else:\n                # import dir is the dirpath for the config file\n                _module_dir = os.path.dirname(config)\n                _module_dir = _module_dir.split('/')\n                _module_path = _module_dir[0]\n                for m in _module_dir[1:]:\n                    _module_path = _module_path + '.' + m\n                # print(_module_path)\n                plg_lib = importlib.import_module(_module_path)\n\n    # set cudnn_benchmark\n    if cfg.get('cudnn_benchmark', False):\n        torch.backends.cudnn.benchmark = True\n    if IS_CUDA_AVAILABLE or IS_MLU_AVAILABLE:\n        device = \"cuda\"\n    else:\n        device = \"cpu\"\n    model = init_demo_detector(cfg, None, device=device)\n    model.CLASSES = CocoDataset.CLASSES\n\n    results = inference_demo_detector(model, img)\n    visualize = model.show_result(\n        img,\n        results,\n        bbox_color=CocoDataset.PALETTE,\n        text_color=CocoDataset.PALETTE,\n        mask_color=CocoDataset.PALETTE,\n        show=False,\n        out_file=None,\n        score_thr=0.3\n    )\n    del model\n    return visualize\n\n\ndescription = \"\"\"\n#  <center>Prompt Segment Anything (zero-shot instance segmentation demo)</center>\nGithub link: [Link](https://github.com/RockeyCoss/Prompt-Segment-Anything)\nYou can select the model you want to use from the \"Model\" dropdown menu and click \"Submit\" to segment the image you uploaded to the \"Input Image\" box.\n\"\"\"\nif SPACE_ID is not None:\n    description += f'\\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href=\"https://huggingface.co/spaces/{SPACE_ID}?duplicate=true\"><img style=\"display: inline; margin-top: 0em; margin-bottom: 0em\" src=\"https://bit.ly/3gLdBN6\" alt=\"Duplicate Space\" /></a></p>'\n\n\ndef main():\n    with gr.Blocks() as demo:\n        gr.Markdown(description)\n        with gr.Column():\n            with gr.Row():\n                with gr.Column():\n                    input_img = gr.Image(type=\"numpy\", label=\"Input Image\")\n                    model_type = gr.Dropdown(choices=list(config_dict.keys()),\n                                             value=list(config_dict.keys())[0],\n                                             label='Model',\n                                             multiselect=False)\n                    with gr.Row():\n                        clear_btn = gr.Button(value=\"Clear\")\n                        submit_btn = gr.Button(value=\"Submit\")\n                output_img = gr.Image(type=\"numpy\", label=\"Output\")\n            gr.Examples(\n                examples=[[\"./assets/img1.jpg\", \"r50-hdetr_sam-vit-b\"],\n                          [\"./assets/img2.jpg\", \"r50-hdetr_sam-vit-b\"],\n                          [\"./assets/img3.jpg\", \"r50-hdetr_sam-vit-b\"],\n                          [\"./assets/img4.jpg\", \"r50-hdetr_sam-vit-b\"]],\n                inputs=[input_img, model_type],\n                outputs=output_img,\n                fn=inference\n            )\n\n        submit_btn.click(inference,\n                         inputs=[input_img, model_type],\n                         outputs=output_img)\n        clear_btn.click(lambda: [None, None], None, [input_img, output_img], queue=False)\n\n    demo.queue()\n    demo.launch()\n\n\nif __name__ == '__main__':\n    main()\n"
  },
  {
    "path": "mmdet/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\n\nfrom .version import __version__, short_version\n\n\ndef digit_version(version_str):\n    digit_version = []\n    for x in version_str.split('.'):\n        if x.isdigit():\n            digit_version.append(int(x))\n        elif x.find('rc') != -1:\n            patch_version = x.split('rc')\n            digit_version.append(int(patch_version[0]) - 1)\n            digit_version.append(int(patch_version[1]))\n    return digit_version\n\n\nmmcv_minimum_version = '1.3.17'\nmmcv_maximum_version = '1.8.0'\nmmcv_version = digit_version(mmcv.__version__)\n\n\nassert (mmcv_version >= digit_version(mmcv_minimum_version)\n        and mmcv_version <= digit_version(mmcv_maximum_version)), \\\n    f'MMCV=={mmcv.__version__} is used but incompatible. ' \\\n    f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'\n\n__all__ = ['__version__', 'short_version']\n"
  },
  {
    "path": "mmdet/apis/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .inference import (async_inference_detector, inference_detector,\n                        init_detector, show_result_pyplot)\nfrom .test import multi_gpu_test, single_gpu_test\nfrom .train import (get_root_logger, init_random_seed, set_random_seed,\n                    train_detector)\n\n__all__ = [\n    'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector',\n    'async_inference_detector', 'inference_detector', 'show_result_pyplot',\n    'multi_gpu_test', 'single_gpu_test', 'init_random_seed'\n]\n"
  },
  {
    "path": "mmdet/apis/inference.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom pathlib import Path\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.ops import RoIPool\nfrom mmcv.parallel import collate, scatter\nfrom mmcv.runner import load_checkpoint\n\nfrom mmdet.core import get_classes\nfrom mmdet.datasets import replace_ImageToTensor\nfrom mmdet.datasets.pipelines import Compose\nfrom mmdet.models import build_detector\n\n\ndef init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):\n    \"\"\"Initialize a detector from config file.\n\n    Args:\n        config (str, :obj:`Path`, or :obj:`mmcv.Config`): Config file path,\n            :obj:`Path`, or the config object.\n        checkpoint (str, optional): Checkpoint path. If left as None, the model\n            will not load any weights.\n        cfg_options (dict): Options to override some settings in the used\n            config.\n\n    Returns:\n        nn.Module: The constructed detector.\n    \"\"\"\n    if isinstance(config, (str, Path)):\n        config = mmcv.Config.fromfile(config)\n    elif not isinstance(config, mmcv.Config):\n        raise TypeError('config must be a filename or Config object, '\n                        f'but got {type(config)}')\n    if cfg_options is not None:\n        config.merge_from_dict(cfg_options)\n    if 'pretrained' in config.model:\n        config.model.pretrained = None\n    elif 'init_cfg' in config.model.backbone:\n        config.model.backbone.init_cfg = None\n    config.model.train_cfg = None\n    model = build_detector(config.model, test_cfg=config.get('test_cfg'))\n    if checkpoint is not None:\n        checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')\n        if 'CLASSES' in checkpoint.get('meta', {}):\n            model.CLASSES = checkpoint['meta']['CLASSES']\n        else:\n            warnings.simplefilter('once')\n            warnings.warn('Class names are not saved in the checkpoint\\'s '\n                          'meta data, use COCO classes by default.')\n            model.CLASSES = get_classes('coco')\n    model.cfg = config  # save the config in the model for convenience\n    model.to(device)\n    model.eval()\n\n    if device == 'npu':\n        from mmcv.device.npu import NPUDataParallel\n        model = NPUDataParallel(model)\n        model.cfg = config\n\n    return model\n\n\nclass LoadImage:\n    \"\"\"Deprecated.\n\n    A simple pipeline to load image.\n    \"\"\"\n\n    def __call__(self, results):\n        \"\"\"Call function to load images into results.\n\n        Args:\n            results (dict): A result dict contains the file name\n                of the image to be read.\n        Returns:\n            dict: ``results`` will be returned containing loaded image.\n        \"\"\"\n        warnings.simplefilter('once')\n        warnings.warn('`LoadImage` is deprecated and will be removed in '\n                      'future releases. You may use `LoadImageFromWebcam` '\n                      'from `mmdet.datasets.pipelines.` instead.')\n        if isinstance(results['img'], str):\n            results['filename'] = results['img']\n            results['ori_filename'] = results['img']\n        else:\n            results['filename'] = None\n            results['ori_filename'] = None\n        img = mmcv.imread(results['img'])\n        results['img'] = img\n        results['img_fields'] = ['img']\n        results['img_shape'] = img.shape\n        results['ori_shape'] = img.shape\n        return results\n\n\ndef inference_detector(model, imgs):\n    \"\"\"Inference image(s) with the detector.\n\n    Args:\n        model (nn.Module): The loaded detector.\n        imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):\n           Either image files or loaded images.\n\n    Returns:\n        If imgs is a list or tuple, the same length list type results\n        will be returned, otherwise return the detection results directly.\n    \"\"\"\n\n    if isinstance(imgs, (list, tuple)):\n        is_batch = True\n    else:\n        imgs = [imgs]\n        is_batch = False\n\n    cfg = model.cfg\n    device = next(model.parameters()).device  # model device\n\n    if isinstance(imgs[0], np.ndarray):\n        cfg = cfg.copy()\n        # set loading pipeline type\n        cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'\n\n    cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)\n    test_pipeline = Compose(cfg.data.test.pipeline)\n\n    datas = []\n    for img in imgs:\n        # prepare data\n        if isinstance(img, np.ndarray):\n            # directly add img\n            data = dict(img=img)\n        else:\n            # add information into dict\n            data = dict(img_info=dict(filename=img), img_prefix=None)\n        # build the data pipeline\n        data = test_pipeline(data)\n        datas.append(data)\n\n    data = collate(datas, samples_per_gpu=len(imgs))\n    # just get the actual data from DataContainer\n    data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]\n    data['img'] = [img.data[0] for img in data['img']]\n    if next(model.parameters()).is_cuda:\n        # scatter to specified GPU\n        data = scatter(data, [device])[0]\n    else:\n        for m in model.modules():\n            assert not isinstance(\n                m, RoIPool\n            ), 'CPU inference with RoIPool is not supported currently.'\n\n    # forward the model\n    with torch.no_grad():\n        results = model(return_loss=False, rescale=True, **data)\n\n    if not is_batch:\n        return results[0]\n    else:\n        return results\n\n\nasync def async_inference_detector(model, imgs):\n    \"\"\"Async inference image(s) with the detector.\n\n    Args:\n        model (nn.Module): The loaded detector.\n        img (str | ndarray): Either image files or loaded images.\n\n    Returns:\n        Awaitable detection results.\n    \"\"\"\n    if not isinstance(imgs, (list, tuple)):\n        imgs = [imgs]\n\n    cfg = model.cfg\n    device = next(model.parameters()).device  # model device\n\n    if isinstance(imgs[0], np.ndarray):\n        cfg = cfg.copy()\n        # set loading pipeline type\n        cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'\n\n    cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)\n    test_pipeline = Compose(cfg.data.test.pipeline)\n\n    datas = []\n    for img in imgs:\n        # prepare data\n        if isinstance(img, np.ndarray):\n            # directly add img\n            data = dict(img=img)\n        else:\n            # add information into dict\n            data = dict(img_info=dict(filename=img), img_prefix=None)\n        # build the data pipeline\n        data = test_pipeline(data)\n        datas.append(data)\n\n    data = collate(datas, samples_per_gpu=len(imgs))\n    # just get the actual data from DataContainer\n    data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]\n    data['img'] = [img.data[0] for img in data['img']]\n    if next(model.parameters()).is_cuda:\n        # scatter to specified GPU\n        data = scatter(data, [device])[0]\n    else:\n        for m in model.modules():\n            assert not isinstance(\n                m, RoIPool\n            ), 'CPU inference with RoIPool is not supported currently.'\n\n    # We don't restore `torch.is_grad_enabled()` value during concurrent\n    # inference since execution can overlap\n    torch.set_grad_enabled(False)\n    results = await model.aforward_test(rescale=True, **data)\n    return results\n\n\ndef show_result_pyplot(model,\n                       img,\n                       result,\n                       score_thr=0.3,\n                       title='result',\n                       wait_time=0,\n                       palette=None,\n                       out_file=None):\n    \"\"\"Visualize the detection results on the image.\n\n    Args:\n        model (nn.Module): The loaded detector.\n        img (str or np.ndarray): Image filename or loaded image.\n        result (tuple[list] or list): The detection result, can be either\n            (bbox, segm) or just bbox.\n        score_thr (float): The threshold to visualize the bboxes and masks.\n        title (str): Title of the pyplot figure.\n        wait_time (float): Value of waitKey param. Default: 0.\n        palette (str or tuple(int) or :obj:`Color`): Color.\n            The tuple of color should be in BGR order.\n        out_file (str or None): The path to write the image.\n            Default: None.\n    \"\"\"\n    if hasattr(model, 'module'):\n        model = model.module\n    model.show_result(\n        img,\n        result,\n        score_thr=score_thr,\n        show=True,\n        wait_time=wait_time,\n        win_name=title,\n        bbox_color=palette,\n        text_color=(200, 200, 200),\n        mask_color=palette,\n        out_file=out_file)\n"
  },
  {
    "path": "mmdet/apis/test.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport pickle\nimport shutil\nimport tempfile\nimport time\n\nimport mmcv\nimport torch\nimport torch.distributed as dist\nfrom mmcv.image import tensor2imgs\nfrom mmcv.runner import get_dist_info\n\nfrom mmdet.core import encode_mask_results\n\n\ndef single_gpu_test(model,\n                    data_loader,\n                    show=False,\n                    out_dir=None,\n                    show_score_thr=0.3):\n    model.eval()\n    results = []\n    dataset = data_loader.dataset\n    PALETTE = getattr(dataset, 'PALETTE', None)\n    prog_bar = mmcv.ProgressBar(len(dataset))\n    for i, data in enumerate(data_loader):\n        with torch.no_grad():\n            result = model(return_loss=False, rescale=True, **data)\n\n        batch_size = len(result)\n        if show or out_dir:\n            if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):\n                img_tensor = data['img'][0]\n            else:\n                img_tensor = data['img'][0].data[0]\n            img_metas = data['img_metas'][0].data[0]\n            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])\n            assert len(imgs) == len(img_metas)\n\n            for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):\n                h, w, _ = img_meta['img_shape']\n                img_show = img[:h, :w, :]\n\n                ori_h, ori_w = img_meta['ori_shape'][:-1]\n                img_show = mmcv.imresize(img_show, (ori_w, ori_h))\n\n                if out_dir:\n                    out_file = osp.join(out_dir, img_meta['ori_filename'])\n                else:\n                    out_file = None\n\n                model.module.show_result(\n                    img_show,\n                    result[i],\n                    bbox_color=PALETTE,\n                    text_color=PALETTE,\n                    mask_color=PALETTE,\n                    show=show,\n                    out_file=out_file,\n                    score_thr=show_score_thr)\n\n        # encode mask results\n        if isinstance(result[0], tuple):\n            result = [(bbox_results, encode_mask_results(mask_results))\n                      for bbox_results, mask_results in result]\n        # This logic is only used in panoptic segmentation test.\n        elif isinstance(result[0], dict) and 'ins_results' in result[0]:\n            for j in range(len(result)):\n                bbox_results, mask_results = result[j]['ins_results']\n                result[j]['ins_results'] = (bbox_results,\n                                            encode_mask_results(mask_results))\n\n        results.extend(result)\n\n        for _ in range(batch_size):\n            prog_bar.update()\n    return results\n\n\ndef multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):\n    \"\"\"Test model with multiple gpus.\n\n    This method tests model with multiple gpus and collects the results\n    under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'\n    it encodes results to gpu tensors and use gpu communication for results\n    collection. On cpu mode it saves the results on different gpus to 'tmpdir'\n    and collects them by the rank 0 worker.\n\n    Args:\n        model (nn.Module): Model to be tested.\n        data_loader (nn.Dataloader): Pytorch data loader.\n        tmpdir (str): Path of directory to save the temporary results from\n            different gpus under cpu mode.\n        gpu_collect (bool): Option to use either gpu or cpu to collect results.\n\n    Returns:\n        list: The prediction results.\n    \"\"\"\n    model.eval()\n    results = []\n    dataset = data_loader.dataset\n    rank, world_size = get_dist_info()\n    if rank == 0:\n        prog_bar = mmcv.ProgressBar(len(dataset))\n    time.sleep(2)  # This line can prevent deadlock problem in some cases.\n    for i, data in enumerate(data_loader):\n        with torch.no_grad():\n            result = model(return_loss=False, rescale=True, **data)\n            # encode mask results\n            if isinstance(result[0], tuple):\n                result = [(bbox_results, encode_mask_results(mask_results))\n                          for bbox_results, mask_results in result]\n            # This logic is only used in panoptic segmentation test.\n            elif isinstance(result[0], dict) and 'ins_results' in result[0]:\n                for j in range(len(result)):\n                    bbox_results, mask_results = result[j]['ins_results']\n                    result[j]['ins_results'] = (\n                        bbox_results, encode_mask_results(mask_results))\n\n        results.extend(result)\n\n        if rank == 0:\n            batch_size = len(result)\n            for _ in range(batch_size * world_size):\n                prog_bar.update()\n\n    # collect results from all ranks\n    if gpu_collect:\n        results = collect_results_gpu(results, len(dataset))\n    else:\n        results = collect_results_cpu(results, len(dataset), tmpdir)\n    return results\n\n\ndef collect_results_cpu(result_part, size, tmpdir=None):\n    rank, world_size = get_dist_info()\n    # create a tmp dir if it is not specified\n    if tmpdir is None:\n        MAX_LEN = 512\n        # 32 is whitespace\n        dir_tensor = torch.full((MAX_LEN, ),\n                                32,\n                                dtype=torch.uint8,\n                                device='cuda')\n        if rank == 0:\n            mmcv.mkdir_or_exist('.dist_test')\n            tmpdir = tempfile.mkdtemp(dir='.dist_test')\n            tmpdir = torch.tensor(\n                bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')\n            dir_tensor[:len(tmpdir)] = tmpdir\n        dist.broadcast(dir_tensor, 0)\n        tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()\n    else:\n        mmcv.mkdir_or_exist(tmpdir)\n    # dump the part result to the dir\n    mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))\n    dist.barrier()\n    # collect all parts\n    if rank != 0:\n        return None\n    else:\n        # load results of all parts from tmp dir\n        part_list = []\n        for i in range(world_size):\n            part_file = osp.join(tmpdir, f'part_{i}.pkl')\n            part_list.append(mmcv.load(part_file))\n        # sort the results\n        ordered_results = []\n        for res in zip(*part_list):\n            ordered_results.extend(list(res))\n        # the dataloader may pad some samples\n        ordered_results = ordered_results[:size]\n        # remove tmp dir\n        shutil.rmtree(tmpdir)\n        return ordered_results\n\n\ndef collect_results_gpu(result_part, size):\n    rank, world_size = get_dist_info()\n    # dump result part to tensor with pickle\n    part_tensor = torch.tensor(\n        bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')\n    # gather all result part tensor shape\n    shape_tensor = torch.tensor(part_tensor.shape, device='cuda')\n    shape_list = [shape_tensor.clone() for _ in range(world_size)]\n    dist.all_gather(shape_list, shape_tensor)\n    # padding result part tensor to max length\n    shape_max = torch.tensor(shape_list).max()\n    part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')\n    part_send[:shape_tensor[0]] = part_tensor\n    part_recv_list = [\n        part_tensor.new_zeros(shape_max) for _ in range(world_size)\n    ]\n    # gather all result part\n    dist.all_gather(part_recv_list, part_send)\n\n    if rank == 0:\n        part_list = []\n        for recv, shape in zip(part_recv_list, shape_list):\n            part_list.append(\n                pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))\n        # sort the results\n        ordered_results = []\n        for res in zip(*part_list):\n            ordered_results.extend(list(res))\n        # the dataloader may pad some samples\n        ordered_results = ordered_results[:size]\n        return ordered_results\n"
  },
  {
    "path": "mmdet/apis/train.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport random\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner,\n                         Fp16OptimizerHook, OptimizerHook, build_runner,\n                         get_dist_info)\n\nfrom mmdet.core import DistEvalHook, EvalHook, build_optimizer\nfrom mmdet.datasets import (build_dataloader, build_dataset,\n                            replace_ImageToTensor)\nfrom mmdet.utils import (build_ddp, build_dp, compat_cfg,\n                         find_latest_checkpoint, get_root_logger)\n\n\ndef init_random_seed(seed=None, device='cuda'):\n    \"\"\"Initialize random seed.\n\n    If the seed is not set, the seed will be automatically randomized,\n    and then broadcast to all processes to prevent some potential bugs.\n\n    Args:\n        seed (int, Optional): The seed. Default to None.\n        device (str): The device where the seed will be put on.\n            Default to 'cuda'.\n\n    Returns:\n        int: Seed to be used.\n    \"\"\"\n    if seed is not None:\n        return seed\n\n    # Make sure all ranks share the same random seed to prevent\n    # some potential bugs. Please refer to\n    # https://github.com/open-mmlab/mmdetection/issues/6339\n    rank, world_size = get_dist_info()\n    seed = np.random.randint(2**31)\n    if world_size == 1:\n        return seed\n\n    if rank == 0:\n        random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n    else:\n        random_num = torch.tensor(0, dtype=torch.int32, device=device)\n    dist.broadcast(random_num, src=0)\n    return random_num.item()\n\n\ndef set_random_seed(seed, deterministic=False):\n    \"\"\"Set random seed.\n\n    Args:\n        seed (int): Seed to be used.\n        deterministic (bool): Whether to set the deterministic option for\n            CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n            to True and `torch.backends.cudnn.benchmark` to False.\n            Default: False.\n    \"\"\"\n    random.seed(seed)\n    np.random.seed(seed)\n    torch.manual_seed(seed)\n    torch.cuda.manual_seed_all(seed)\n    if deterministic:\n        torch.backends.cudnn.deterministic = True\n        torch.backends.cudnn.benchmark = False\n\n\ndef auto_scale_lr(cfg, distributed, logger):\n    \"\"\"Automatically scaling LR according to GPU number and sample per GPU.\n\n    Args:\n        cfg (config): Training config.\n        distributed (bool): Using distributed or not.\n        logger (logging.Logger): Logger.\n    \"\"\"\n    # Get flag from config\n    if ('auto_scale_lr' not in cfg) or \\\n            (not cfg.auto_scale_lr.get('enable', False)):\n        logger.info('Automatic scaling of learning rate (LR)'\n                    ' has been disabled.')\n        return\n\n    # Get base batch size from config\n    base_batch_size = cfg.auto_scale_lr.get('base_batch_size', None)\n    if base_batch_size is None:\n        return\n\n    # Get gpu number\n    if distributed:\n        _, world_size = get_dist_info()\n        num_gpus = len(range(world_size))\n    else:\n        num_gpus = len(cfg.gpu_ids)\n\n    # calculate the batch size\n    samples_per_gpu = cfg.data.train_dataloader.samples_per_gpu\n    batch_size = num_gpus * samples_per_gpu\n    logger.info(f'Training with {num_gpus} GPU(s) with {samples_per_gpu} '\n                f'samples per GPU. The total batch size is {batch_size}.')\n\n    if batch_size != base_batch_size:\n        # scale LR with\n        # [linear scaling rule](https://arxiv.org/abs/1706.02677)\n        scaled_lr = (batch_size / base_batch_size) * cfg.optimizer.lr\n        logger.info('LR has been automatically scaled '\n                    f'from {cfg.optimizer.lr} to {scaled_lr}')\n        cfg.optimizer.lr = scaled_lr\n    else:\n        logger.info('The batch size match the '\n                    f'base batch size: {base_batch_size}, '\n                    f'will not scaling the LR ({cfg.optimizer.lr}).')\n\n\ndef train_detector(model,\n                   dataset,\n                   cfg,\n                   distributed=False,\n                   validate=False,\n                   timestamp=None,\n                   meta=None):\n\n    cfg = compat_cfg(cfg)\n    logger = get_root_logger(log_level=cfg.log_level)\n\n    # prepare data loaders\n    dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]\n\n    runner_type = 'EpochBasedRunner' if 'runner' not in cfg else cfg.runner[\n        'type']\n\n    train_dataloader_default_args = dict(\n        samples_per_gpu=2,\n        workers_per_gpu=2,\n        # `num_gpus` will be ignored if distributed\n        num_gpus=len(cfg.gpu_ids),\n        dist=distributed,\n        seed=cfg.seed,\n        runner_type=runner_type,\n        persistent_workers=False)\n\n    train_loader_cfg = {\n        **train_dataloader_default_args,\n        **cfg.data.get('train_dataloader', {})\n    }\n\n    data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]\n\n    # put model on gpus\n    if distributed:\n        find_unused_parameters = cfg.get('find_unused_parameters', False)\n        # Sets the `find_unused_parameters` parameter in\n        # torch.nn.parallel.DistributedDataParallel\n        model = build_ddp(\n            model,\n            cfg.device,\n            device_ids=[int(os.environ['LOCAL_RANK'])],\n            broadcast_buffers=False,\n            find_unused_parameters=find_unused_parameters)\n    else:\n        model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)\n\n    # build optimizer\n    auto_scale_lr(cfg, distributed, logger)\n    optimizer = build_optimizer(model, cfg.optimizer)\n\n    runner = build_runner(\n        cfg.runner,\n        default_args=dict(\n            model=model,\n            optimizer=optimizer,\n            work_dir=cfg.work_dir,\n            logger=logger,\n            meta=meta))\n\n    # an ugly workaround to make .log and .log.json filenames the same\n    runner.timestamp = timestamp\n\n    # fp16 setting\n    fp16_cfg = cfg.get('fp16', None)\n    if fp16_cfg is None and cfg.get('device', None) == 'npu':\n        fp16_cfg = dict(loss_scale='dynamic')\n    if fp16_cfg is not None:\n        optimizer_config = Fp16OptimizerHook(\n            **cfg.optimizer_config, **fp16_cfg, distributed=distributed)\n    elif distributed and 'type' not in cfg.optimizer_config:\n        optimizer_config = OptimizerHook(**cfg.optimizer_config)\n    else:\n        optimizer_config = cfg.optimizer_config\n\n    # register hooks\n    runner.register_training_hooks(\n        cfg.lr_config,\n        optimizer_config,\n        cfg.checkpoint_config,\n        cfg.log_config,\n        cfg.get('momentum_config', None),\n        custom_hooks_config=cfg.get('custom_hooks', None))\n\n    if distributed:\n        if isinstance(runner, EpochBasedRunner):\n            runner.register_hook(DistSamplerSeedHook())\n\n    # register eval hooks\n    if validate:\n        val_dataloader_default_args = dict(\n            samples_per_gpu=1,\n            workers_per_gpu=2,\n            dist=distributed,\n            shuffle=False,\n            persistent_workers=False)\n\n        val_dataloader_args = {\n            **val_dataloader_default_args,\n            **cfg.data.get('val_dataloader', {})\n        }\n        # Support batch_size > 1 in validation\n\n        if val_dataloader_args['samples_per_gpu'] > 1:\n            # Replace 'ImageToTensor' to 'DefaultFormatBundle'\n            cfg.data.val.pipeline = replace_ImageToTensor(\n                cfg.data.val.pipeline)\n        val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))\n\n        val_dataloader = build_dataloader(val_dataset, **val_dataloader_args)\n        eval_cfg = cfg.get('evaluation', {})\n        eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'\n        eval_hook = DistEvalHook if distributed else EvalHook\n        # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the\n        # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'.\n        runner.register_hook(\n            eval_hook(val_dataloader, **eval_cfg), priority='LOW')\n\n    resume_from = None\n    if cfg.resume_from is None and cfg.get('auto_resume'):\n        resume_from = find_latest_checkpoint(cfg.work_dir)\n    if resume_from is not None:\n        cfg.resume_from = resume_from\n\n    if cfg.resume_from:\n        runner.resume(cfg.resume_from)\n    elif cfg.load_from:\n        runner.load_checkpoint(cfg.load_from)\n    runner.run(data_loaders, cfg.workflow)\n"
  },
  {
    "path": "mmdet/core/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .anchor import *  # noqa: F401, F403\nfrom .bbox import *  # noqa: F401, F403\nfrom .data_structures import *  # noqa: F401, F403\nfrom .evaluation import *  # noqa: F401, F403\nfrom .hook import *  # noqa: F401, F403\nfrom .mask import *  # noqa: F401, F403\nfrom .optimizers import *  # noqa: F401, F403\nfrom .post_processing import *  # noqa: F401, F403\nfrom .utils import *  # noqa: F401, F403\n"
  },
  {
    "path": "mmdet/core/anchor/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,\n                               YOLOAnchorGenerator)\nfrom .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS,\n                      build_anchor_generator, build_prior_generator)\nfrom .point_generator import MlvlPointGenerator, PointGenerator\nfrom .utils import anchor_inside_flags, calc_region, images_to_levels\n\n__all__ = [\n    'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',\n    'PointGenerator', 'images_to_levels', 'calc_region',\n    'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator',\n    'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator'\n]\n"
  },
  {
    "path": "mmdet/core/anchor/anchor_generator.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom torch.nn.modules.utils import _pair\n\nfrom .builder import PRIOR_GENERATORS\n\n\n@PRIOR_GENERATORS.register_module()\nclass AnchorGenerator:\n    \"\"\"Standard anchor generator for 2D anchor-based detectors.\n\n    Args:\n        strides (list[int] | list[tuple[int, int]]): Strides of anchors\n            in multiple feature levels in order (w, h).\n        ratios (list[float]): The list of ratios between the height and width\n            of anchors in a single level.\n        scales (list[int] | None): Anchor scales for anchors in a single level.\n            It cannot be set at the same time if `octave_base_scale` and\n            `scales_per_octave` are set.\n        base_sizes (list[int] | None): The basic sizes\n            of anchors in multiple levels.\n            If None is given, strides will be used as base_sizes.\n            (If strides are non square, the shortest stride is taken.)\n        scale_major (bool): Whether to multiply scales first when generating\n            base anchors. If true, the anchors in the same row will have the\n            same scales. By default it is True in V2.0\n        octave_base_scale (int): The base scale of octave.\n        scales_per_octave (int): Number of scales for each octave.\n            `octave_base_scale` and `scales_per_octave` are usually used in\n            retinanet and the `scales` should be None when they are set.\n        centers (list[tuple[float, float]] | None): The centers of the anchor\n            relative to the feature grid center in multiple feature levels.\n            By default it is set to be None and not used. If a list of tuple of\n            float is given, they will be used to shift the centers of anchors.\n        center_offset (float): The offset of center in proportion to anchors'\n            width and height. By default it is 0 in V2.0.\n\n    Examples:\n        >>> from mmdet.core import AnchorGenerator\n        >>> self = AnchorGenerator([16], [1.], [1.], [9])\n        >>> all_anchors = self.grid_priors([(2, 2)], device='cpu')\n        >>> print(all_anchors)\n        [tensor([[-4.5000, -4.5000,  4.5000,  4.5000],\n                [11.5000, -4.5000, 20.5000,  4.5000],\n                [-4.5000, 11.5000,  4.5000, 20.5000],\n                [11.5000, 11.5000, 20.5000, 20.5000]])]\n        >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])\n        >>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu')\n        >>> print(all_anchors)\n        [tensor([[-4.5000, -4.5000,  4.5000,  4.5000],\n                [11.5000, -4.5000, 20.5000,  4.5000],\n                [-4.5000, 11.5000,  4.5000, 20.5000],\n                [11.5000, 11.5000, 20.5000, 20.5000]]), \\\n        tensor([[-9., -9., 9., 9.]])]\n    \"\"\"\n\n    def __init__(self,\n                 strides,\n                 ratios,\n                 scales=None,\n                 base_sizes=None,\n                 scale_major=True,\n                 octave_base_scale=None,\n                 scales_per_octave=None,\n                 centers=None,\n                 center_offset=0.):\n        # check center and center_offset\n        if center_offset != 0:\n            assert centers is None, 'center cannot be set when center_offset' \\\n                                    f'!=0, {centers} is given.'\n        if not (0 <= center_offset <= 1):\n            raise ValueError('center_offset should be in range [0, 1], '\n                             f'{center_offset} is given.')\n        if centers is not None:\n            assert len(centers) == len(strides), \\\n                'The number of strides should be the same as centers, got ' \\\n                f'{strides} and {centers}'\n\n        # calculate base sizes of anchors\n        self.strides = [_pair(stride) for stride in strides]\n        self.base_sizes = [min(stride) for stride in self.strides\n                           ] if base_sizes is None else base_sizes\n        assert len(self.base_sizes) == len(self.strides), \\\n            'The number of strides should be the same as base sizes, got ' \\\n            f'{self.strides} and {self.base_sizes}'\n\n        # calculate scales of anchors\n        assert ((octave_base_scale is not None\n                 and scales_per_octave is not None) ^ (scales is not None)), \\\n            'scales and octave_base_scale with scales_per_octave cannot' \\\n            ' be set at the same time'\n        if scales is not None:\n            self.scales = torch.Tensor(scales)\n        elif octave_base_scale is not None and scales_per_octave is not None:\n            octave_scales = np.array(\n                [2**(i / scales_per_octave) for i in range(scales_per_octave)])\n            scales = octave_scales * octave_base_scale\n            self.scales = torch.Tensor(scales)\n        else:\n            raise ValueError('Either scales or octave_base_scale with '\n                             'scales_per_octave should be set')\n\n        self.octave_base_scale = octave_base_scale\n        self.scales_per_octave = scales_per_octave\n        self.ratios = torch.Tensor(ratios)\n        self.scale_major = scale_major\n        self.centers = centers\n        self.center_offset = center_offset\n        self.base_anchors = self.gen_base_anchors()\n\n    @property\n    def num_base_anchors(self):\n        \"\"\"list[int]: total number of base anchors in a feature grid\"\"\"\n        return self.num_base_priors\n\n    @property\n    def num_base_priors(self):\n        \"\"\"list[int]: The number of priors (anchors) at a point\n        on the feature grid\"\"\"\n        return [base_anchors.size(0) for base_anchors in self.base_anchors]\n\n    @property\n    def num_levels(self):\n        \"\"\"int: number of feature levels that the generator will be applied\"\"\"\n        return len(self.strides)\n\n    def gen_base_anchors(self):\n        \"\"\"Generate base anchors.\n\n        Returns:\n            list(torch.Tensor): Base anchors of a feature grid in multiple \\\n                feature levels.\n        \"\"\"\n        multi_level_base_anchors = []\n        for i, base_size in enumerate(self.base_sizes):\n            center = None\n            if self.centers is not None:\n                center = self.centers[i]\n            multi_level_base_anchors.append(\n                self.gen_single_level_base_anchors(\n                    base_size,\n                    scales=self.scales,\n                    ratios=self.ratios,\n                    center=center))\n        return multi_level_base_anchors\n\n    def gen_single_level_base_anchors(self,\n                                      base_size,\n                                      scales,\n                                      ratios,\n                                      center=None):\n        \"\"\"Generate base anchors of a single level.\n\n        Args:\n            base_size (int | float): Basic size of an anchor.\n            scales (torch.Tensor): Scales of the anchor.\n            ratios (torch.Tensor): The ratio between between the height\n                and width of anchors in a single level.\n            center (tuple[float], optional): The center of the base anchor\n                related to a single feature grid. Defaults to None.\n\n        Returns:\n            torch.Tensor: Anchors in a single-level feature maps.\n        \"\"\"\n        w = base_size\n        h = base_size\n        if center is None:\n            x_center = self.center_offset * w\n            y_center = self.center_offset * h\n        else:\n            x_center, y_center = center\n\n        h_ratios = torch.sqrt(ratios)\n        w_ratios = 1 / h_ratios\n        if self.scale_major:\n            ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)\n            hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)\n        else:\n            ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)\n            hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)\n\n        # use float anchor and the anchor's center is aligned with the\n        # pixel center\n        base_anchors = [\n            x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,\n            y_center + 0.5 * hs\n        ]\n        base_anchors = torch.stack(base_anchors, dim=-1)\n\n        return base_anchors\n\n    def _meshgrid(self, x, y, row_major=True):\n        \"\"\"Generate mesh grid of x and y.\n\n        Args:\n            x (torch.Tensor): Grids of x dimension.\n            y (torch.Tensor): Grids of y dimension.\n            row_major (bool, optional): Whether to return y grids first.\n                Defaults to True.\n\n        Returns:\n            tuple[torch.Tensor]: The mesh grids of x and y.\n        \"\"\"\n        # use shape instead of len to keep tracing while exporting to onnx\n        xx = x.repeat(y.shape[0])\n        yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)\n        if row_major:\n            return xx, yy\n        else:\n            return yy, xx\n\n    def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda'):\n        \"\"\"Generate grid anchors in multiple feature levels.\n\n        Args:\n            featmap_sizes (list[tuple]): List of feature map sizes in\n                multiple feature levels.\n            dtype (:obj:`torch.dtype`): Dtype of priors.\n                Default: torch.float32.\n            device (str): The device where the anchors will be put on.\n\n        Return:\n            list[torch.Tensor]: Anchors in multiple feature levels. \\\n                The sizes of each tensor should be [N, 4], where \\\n                N = width * height * num_base_anchors, width and height \\\n                are the sizes of the corresponding feature level, \\\n                num_base_anchors is the number of anchors for that level.\n        \"\"\"\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_anchors = []\n        for i in range(self.num_levels):\n            anchors = self.single_level_grid_priors(\n                featmap_sizes[i], level_idx=i, dtype=dtype, device=device)\n            multi_level_anchors.append(anchors)\n        return multi_level_anchors\n\n    def single_level_grid_priors(self,\n                                 featmap_size,\n                                 level_idx,\n                                 dtype=torch.float32,\n                                 device='cuda'):\n        \"\"\"Generate grid anchors of a single level.\n\n        Note:\n            This function is usually called by method ``self.grid_priors``.\n\n        Args:\n            featmap_size (tuple[int]): Size of the feature maps.\n            level_idx (int): The index of corresponding feature map level.\n            dtype (obj:`torch.dtype`): Date type of points.Defaults to\n                ``torch.float32``.\n            device (str, optional): The device the tensor will be put on.\n                Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: Anchors in the overall feature maps.\n        \"\"\"\n\n        base_anchors = self.base_anchors[level_idx].to(device).to(dtype)\n        feat_h, feat_w = featmap_size\n        stride_w, stride_h = self.strides[level_idx]\n        # First create Range with the default dtype, than convert to\n        # target `dtype` for onnx exporting.\n        shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w\n        shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h\n\n        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n        shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)\n        # first feat_w elements correspond to the first row of shifts\n        # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get\n        # shifted anchors (K, A, 4), reshape to (K*A, 4)\n\n        all_anchors = base_anchors[None, :, :] + shifts[:, None, :]\n        all_anchors = all_anchors.view(-1, 4)\n        # first A rows correspond to A anchors of (0, 0) in feature map,\n        # then (0, 1), (0, 2), ...\n        return all_anchors\n\n    def sparse_priors(self,\n                      prior_idxs,\n                      featmap_size,\n                      level_idx,\n                      dtype=torch.float32,\n                      device='cuda'):\n        \"\"\"Generate sparse anchors according to the ``prior_idxs``.\n\n        Args:\n            prior_idxs (Tensor): The index of corresponding anchors\n                in the feature map.\n            featmap_size (tuple[int]): feature map size arrange as (h, w).\n            level_idx (int): The level index of corresponding feature\n                map.\n            dtype (obj:`torch.dtype`): Date type of points.Defaults to\n                ``torch.float32``.\n            device (obj:`torch.device`): The device where the points is\n                located.\n        Returns:\n            Tensor: Anchor with shape (N, 4), N should be equal to\n                the length of ``prior_idxs``.\n        \"\"\"\n\n        height, width = featmap_size\n        num_base_anchors = self.num_base_anchors[level_idx]\n        base_anchor_id = prior_idxs % num_base_anchors\n        x = (prior_idxs //\n             num_base_anchors) % width * self.strides[level_idx][0]\n        y = (prior_idxs // width //\n             num_base_anchors) % height * self.strides[level_idx][1]\n        priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \\\n            self.base_anchors[level_idx][base_anchor_id, :].to(device)\n\n        return priors\n\n    def grid_anchors(self, featmap_sizes, device='cuda'):\n        \"\"\"Generate grid anchors in multiple feature levels.\n\n        Args:\n            featmap_sizes (list[tuple]): List of feature map sizes in\n                multiple feature levels.\n            device (str): Device where the anchors will be put on.\n\n        Return:\n            list[torch.Tensor]: Anchors in multiple feature levels. \\\n                The sizes of each tensor should be [N, 4], where \\\n                N = width * height * num_base_anchors, width and height \\\n                are the sizes of the corresponding feature level, \\\n                num_base_anchors is the number of anchors for that level.\n        \"\"\"\n        warnings.warn('``grid_anchors`` would be deprecated soon. '\n                      'Please use ``grid_priors`` ')\n\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_anchors = []\n        for i in range(self.num_levels):\n            anchors = self.single_level_grid_anchors(\n                self.base_anchors[i].to(device),\n                featmap_sizes[i],\n                self.strides[i],\n                device=device)\n            multi_level_anchors.append(anchors)\n        return multi_level_anchors\n\n    def single_level_grid_anchors(self,\n                                  base_anchors,\n                                  featmap_size,\n                                  stride=(16, 16),\n                                  device='cuda'):\n        \"\"\"Generate grid anchors of a single level.\n\n        Note:\n            This function is usually called by method ``self.grid_anchors``.\n\n        Args:\n            base_anchors (torch.Tensor): The base anchors of a feature grid.\n            featmap_size (tuple[int]): Size of the feature maps.\n            stride (tuple[int], optional): Stride of the feature map in order\n                (w, h). Defaults to (16, 16).\n            device (str, optional): Device the tensor will be put on.\n                Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: Anchors in the overall feature maps.\n        \"\"\"\n\n        warnings.warn(\n            '``single_level_grid_anchors`` would be deprecated soon. '\n            'Please use ``single_level_grid_priors`` ')\n\n        # keep featmap_size as Tensor instead of int, so that we\n        # can convert to ONNX correctly\n        feat_h, feat_w = featmap_size\n        shift_x = torch.arange(0, feat_w, device=device) * stride[0]\n        shift_y = torch.arange(0, feat_h, device=device) * stride[1]\n\n        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n        shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)\n        shifts = shifts.type_as(base_anchors)\n        # first feat_w elements correspond to the first row of shifts\n        # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get\n        # shifted anchors (K, A, 4), reshape to (K*A, 4)\n\n        all_anchors = base_anchors[None, :, :] + shifts[:, None, :]\n        all_anchors = all_anchors.view(-1, 4)\n        # first A rows correspond to A anchors of (0, 0) in feature map,\n        # then (0, 1), (0, 2), ...\n        return all_anchors\n\n    def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):\n        \"\"\"Generate valid flags of anchors in multiple feature levels.\n\n        Args:\n            featmap_sizes (list(tuple)): List of feature map sizes in\n                multiple feature levels.\n            pad_shape (tuple): The padded shape of the image.\n            device (str): Device where the anchors will be put on.\n\n        Return:\n            list(torch.Tensor): Valid flags of anchors in multiple levels.\n        \"\"\"\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_flags = []\n        for i in range(self.num_levels):\n            anchor_stride = self.strides[i]\n            feat_h, feat_w = featmap_sizes[i]\n            h, w = pad_shape[:2]\n            valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)\n            valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)\n            flags = self.single_level_valid_flags((feat_h, feat_w),\n                                                  (valid_feat_h, valid_feat_w),\n                                                  self.num_base_anchors[i],\n                                                  device=device)\n            multi_level_flags.append(flags)\n        return multi_level_flags\n\n    def single_level_valid_flags(self,\n                                 featmap_size,\n                                 valid_size,\n                                 num_base_anchors,\n                                 device='cuda'):\n        \"\"\"Generate the valid flags of anchor in a single feature map.\n\n        Args:\n            featmap_size (tuple[int]): The size of feature maps, arrange\n                as (h, w).\n            valid_size (tuple[int]): The valid size of the feature maps.\n            num_base_anchors (int): The number of base anchors.\n            device (str, optional): Device where the flags will be put on.\n                Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: The valid flags of each anchor in a single level \\\n                feature map.\n        \"\"\"\n        feat_h, feat_w = featmap_size\n        valid_h, valid_w = valid_size\n        assert valid_h <= feat_h and valid_w <= feat_w\n        valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)\n        valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)\n        valid_x[:valid_w] = 1\n        valid_y[:valid_h] = 1\n        valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)\n        valid = valid_xx & valid_yy\n        valid = valid[:, None].expand(valid.size(0),\n                                      num_base_anchors).contiguous().view(-1)\n        return valid\n\n    def __repr__(self):\n        \"\"\"str: a string that describes the module\"\"\"\n        indent_str = '    '\n        repr_str = self.__class__.__name__ + '(\\n'\n        repr_str += f'{indent_str}strides={self.strides},\\n'\n        repr_str += f'{indent_str}ratios={self.ratios},\\n'\n        repr_str += f'{indent_str}scales={self.scales},\\n'\n        repr_str += f'{indent_str}base_sizes={self.base_sizes},\\n'\n        repr_str += f'{indent_str}scale_major={self.scale_major},\\n'\n        repr_str += f'{indent_str}octave_base_scale='\n        repr_str += f'{self.octave_base_scale},\\n'\n        repr_str += f'{indent_str}scales_per_octave='\n        repr_str += f'{self.scales_per_octave},\\n'\n        repr_str += f'{indent_str}num_levels={self.num_levels}\\n'\n        repr_str += f'{indent_str}centers={self.centers},\\n'\n        repr_str += f'{indent_str}center_offset={self.center_offset})'\n        return repr_str\n\n\n@PRIOR_GENERATORS.register_module()\nclass SSDAnchorGenerator(AnchorGenerator):\n    \"\"\"Anchor generator for SSD.\n\n    Args:\n        strides (list[int]  | list[tuple[int, int]]): Strides of anchors\n            in multiple feature levels.\n        ratios (list[float]): The list of ratios between the height and width\n            of anchors in a single level.\n        min_sizes (list[float]): The list of minimum anchor sizes on each\n            level.\n        max_sizes (list[float]): The list of maximum anchor sizes on each\n            level.\n        basesize_ratio_range (tuple(float)): Ratio range of anchors. Being\n            used when not setting min_sizes and max_sizes.\n        input_size (int): Size of feature map, 300 for SSD300, 512 for\n            SSD512. Being used when not setting min_sizes and max_sizes.\n        scale_major (bool): Whether to multiply scales first when generating\n            base anchors. If true, the anchors in the same row will have the\n            same scales. It is always set to be False in SSD.\n    \"\"\"\n\n    def __init__(self,\n                 strides,\n                 ratios,\n                 min_sizes=None,\n                 max_sizes=None,\n                 basesize_ratio_range=(0.15, 0.9),\n                 input_size=300,\n                 scale_major=True):\n        assert len(strides) == len(ratios)\n        assert not (min_sizes is None) ^ (max_sizes is None)\n        self.strides = [_pair(stride) for stride in strides]\n        self.centers = [(stride[0] / 2., stride[1] / 2.)\n                        for stride in self.strides]\n\n        if min_sizes is None and max_sizes is None:\n            # use hard code to generate SSD anchors\n            self.input_size = input_size\n            assert mmcv.is_tuple_of(basesize_ratio_range, float)\n            self.basesize_ratio_range = basesize_ratio_range\n            # calculate anchor ratios and sizes\n            min_ratio, max_ratio = basesize_ratio_range\n            min_ratio = int(min_ratio * 100)\n            max_ratio = int(max_ratio * 100)\n            step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))\n            min_sizes = []\n            max_sizes = []\n            for ratio in range(int(min_ratio), int(max_ratio) + 1, step):\n                min_sizes.append(int(self.input_size * ratio / 100))\n                max_sizes.append(int(self.input_size * (ratio + step) / 100))\n            if self.input_size == 300:\n                if basesize_ratio_range[0] == 0.15:  # SSD300 COCO\n                    min_sizes.insert(0, int(self.input_size * 7 / 100))\n                    max_sizes.insert(0, int(self.input_size * 15 / 100))\n                elif basesize_ratio_range[0] == 0.2:  # SSD300 VOC\n                    min_sizes.insert(0, int(self.input_size * 10 / 100))\n                    max_sizes.insert(0, int(self.input_size * 20 / 100))\n                else:\n                    raise ValueError(\n                        'basesize_ratio_range[0] should be either 0.15'\n                        'or 0.2 when input_size is 300, got '\n                        f'{basesize_ratio_range[0]}.')\n            elif self.input_size == 512:\n                if basesize_ratio_range[0] == 0.1:  # SSD512 COCO\n                    min_sizes.insert(0, int(self.input_size * 4 / 100))\n                    max_sizes.insert(0, int(self.input_size * 10 / 100))\n                elif basesize_ratio_range[0] == 0.15:  # SSD512 VOC\n                    min_sizes.insert(0, int(self.input_size * 7 / 100))\n                    max_sizes.insert(0, int(self.input_size * 15 / 100))\n                else:\n                    raise ValueError(\n                        'When not setting min_sizes and max_sizes,'\n                        'basesize_ratio_range[0] should be either 0.1'\n                        'or 0.15 when input_size is 512, got'\n                        f' {basesize_ratio_range[0]}.')\n            else:\n                raise ValueError(\n                    'Only support 300 or 512 in SSDAnchorGenerator when '\n                    'not setting min_sizes and max_sizes, '\n                    f'got {self.input_size}.')\n\n        assert len(min_sizes) == len(max_sizes) == len(strides)\n\n        anchor_ratios = []\n        anchor_scales = []\n        for k in range(len(self.strides)):\n            scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]\n            anchor_ratio = [1.]\n            for r in ratios[k]:\n                anchor_ratio += [1 / r, r]  # 4 or 6 ratio\n            anchor_ratios.append(torch.Tensor(anchor_ratio))\n            anchor_scales.append(torch.Tensor(scales))\n\n        self.base_sizes = min_sizes\n        self.scales = anchor_scales\n        self.ratios = anchor_ratios\n        self.scale_major = scale_major\n        self.center_offset = 0\n        self.base_anchors = self.gen_base_anchors()\n\n    def gen_base_anchors(self):\n        \"\"\"Generate base anchors.\n\n        Returns:\n            list(torch.Tensor): Base anchors of a feature grid in multiple \\\n                feature levels.\n        \"\"\"\n        multi_level_base_anchors = []\n        for i, base_size in enumerate(self.base_sizes):\n            base_anchors = self.gen_single_level_base_anchors(\n                base_size,\n                scales=self.scales[i],\n                ratios=self.ratios[i],\n                center=self.centers[i])\n            indices = list(range(len(self.ratios[i])))\n            indices.insert(1, len(indices))\n            base_anchors = torch.index_select(base_anchors, 0,\n                                              torch.LongTensor(indices))\n            multi_level_base_anchors.append(base_anchors)\n        return multi_level_base_anchors\n\n    def __repr__(self):\n        \"\"\"str: a string that describes the module\"\"\"\n        indent_str = '    '\n        repr_str = self.__class__.__name__ + '(\\n'\n        repr_str += f'{indent_str}strides={self.strides},\\n'\n        repr_str += f'{indent_str}scales={self.scales},\\n'\n        repr_str += f'{indent_str}scale_major={self.scale_major},\\n'\n        repr_str += f'{indent_str}input_size={self.input_size},\\n'\n        repr_str += f'{indent_str}scales={self.scales},\\n'\n        repr_str += f'{indent_str}ratios={self.ratios},\\n'\n        repr_str += f'{indent_str}num_levels={self.num_levels},\\n'\n        repr_str += f'{indent_str}base_sizes={self.base_sizes},\\n'\n        repr_str += f'{indent_str}basesize_ratio_range='\n        repr_str += f'{self.basesize_ratio_range})'\n        return repr_str\n\n\n@PRIOR_GENERATORS.register_module()\nclass LegacyAnchorGenerator(AnchorGenerator):\n    \"\"\"Legacy anchor generator used in MMDetection V1.x.\n\n    Note:\n        Difference to the V2.0 anchor generator:\n\n        1. The center offset of V1.x anchors are set to be 0.5 rather than 0.\n        2. The width/height are minused by 1 when calculating the anchors' \\\n            centers and corners to meet the V1.x coordinate system.\n        3. The anchors' corners are quantized.\n\n    Args:\n        strides (list[int] | list[tuple[int]]): Strides of anchors\n            in multiple feature levels.\n        ratios (list[float]): The list of ratios between the height and width\n            of anchors in a single level.\n        scales (list[int] | None): Anchor scales for anchors in a single level.\n            It cannot be set at the same time if `octave_base_scale` and\n            `scales_per_octave` are set.\n        base_sizes (list[int]): The basic sizes of anchors in multiple levels.\n            If None is given, strides will be used to generate base_sizes.\n        scale_major (bool): Whether to multiply scales first when generating\n            base anchors. If true, the anchors in the same row will have the\n            same scales. By default it is True in V2.0\n        octave_base_scale (int): The base scale of octave.\n        scales_per_octave (int): Number of scales for each octave.\n            `octave_base_scale` and `scales_per_octave` are usually used in\n            retinanet and the `scales` should be None when they are set.\n        centers (list[tuple[float, float]] | None): The centers of the anchor\n            relative to the feature grid center in multiple feature levels.\n            By default it is set to be None and not used. It a list of float\n            is given, this list will be used to shift the centers of anchors.\n        center_offset (float): The offset of center in proportion to anchors'\n            width and height. By default it is 0.5 in V2.0 but it should be 0.5\n            in v1.x models.\n\n    Examples:\n        >>> from mmdet.core import LegacyAnchorGenerator\n        >>> self = LegacyAnchorGenerator(\n        >>>     [16], [1.], [1.], [9], center_offset=0.5)\n        >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')\n        >>> print(all_anchors)\n        [tensor([[ 0.,  0.,  8.,  8.],\n                [16.,  0., 24.,  8.],\n                [ 0., 16.,  8., 24.],\n                [16., 16., 24., 24.]])]\n    \"\"\"\n\n    def gen_single_level_base_anchors(self,\n                                      base_size,\n                                      scales,\n                                      ratios,\n                                      center=None):\n        \"\"\"Generate base anchors of a single level.\n\n        Note:\n            The width/height of anchors are minused by 1 when calculating \\\n                the centers and corners to meet the V1.x coordinate system.\n\n        Args:\n            base_size (int | float): Basic size of an anchor.\n            scales (torch.Tensor): Scales of the anchor.\n            ratios (torch.Tensor): The ratio between between the height.\n                and width of anchors in a single level.\n            center (tuple[float], optional): The center of the base anchor\n                related to a single feature grid. Defaults to None.\n\n        Returns:\n            torch.Tensor: Anchors in a single-level feature map.\n        \"\"\"\n        w = base_size\n        h = base_size\n        if center is None:\n            x_center = self.center_offset * (w - 1)\n            y_center = self.center_offset * (h - 1)\n        else:\n            x_center, y_center = center\n\n        h_ratios = torch.sqrt(ratios)\n        w_ratios = 1 / h_ratios\n        if self.scale_major:\n            ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)\n            hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)\n        else:\n            ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)\n            hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)\n\n        # use float anchor and the anchor's center is aligned with the\n        # pixel center\n        base_anchors = [\n            x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),\n            x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)\n        ]\n        base_anchors = torch.stack(base_anchors, dim=-1).round()\n\n        return base_anchors\n\n\n@PRIOR_GENERATORS.register_module()\nclass LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):\n    \"\"\"Legacy anchor generator used in MMDetection V1.x.\n\n    The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`\n    can be found in `LegacyAnchorGenerator`.\n    \"\"\"\n\n    def __init__(self,\n                 strides,\n                 ratios,\n                 basesize_ratio_range,\n                 input_size=300,\n                 scale_major=True):\n        super(LegacySSDAnchorGenerator, self).__init__(\n            strides=strides,\n            ratios=ratios,\n            basesize_ratio_range=basesize_ratio_range,\n            input_size=input_size,\n            scale_major=scale_major)\n        self.centers = [((stride - 1) / 2., (stride - 1) / 2.)\n                        for stride in strides]\n        self.base_anchors = self.gen_base_anchors()\n\n\n@PRIOR_GENERATORS.register_module()\nclass YOLOAnchorGenerator(AnchorGenerator):\n    \"\"\"Anchor generator for YOLO.\n\n    Args:\n        strides (list[int] | list[tuple[int, int]]): Strides of anchors\n            in multiple feature levels.\n        base_sizes (list[list[tuple[int, int]]]): The basic sizes\n            of anchors in multiple levels.\n    \"\"\"\n\n    def __init__(self, strides, base_sizes):\n        self.strides = [_pair(stride) for stride in strides]\n        self.centers = [(stride[0] / 2., stride[1] / 2.)\n                        for stride in self.strides]\n        self.base_sizes = []\n        num_anchor_per_level = len(base_sizes[0])\n        for base_sizes_per_level in base_sizes:\n            assert num_anchor_per_level == len(base_sizes_per_level)\n            self.base_sizes.append(\n                [_pair(base_size) for base_size in base_sizes_per_level])\n        self.base_anchors = self.gen_base_anchors()\n\n    @property\n    def num_levels(self):\n        \"\"\"int: number of feature levels that the generator will be applied\"\"\"\n        return len(self.base_sizes)\n\n    def gen_base_anchors(self):\n        \"\"\"Generate base anchors.\n\n        Returns:\n            list(torch.Tensor): Base anchors of a feature grid in multiple \\\n                feature levels.\n        \"\"\"\n        multi_level_base_anchors = []\n        for i, base_sizes_per_level in enumerate(self.base_sizes):\n            center = None\n            if self.centers is not None:\n                center = self.centers[i]\n            multi_level_base_anchors.append(\n                self.gen_single_level_base_anchors(base_sizes_per_level,\n                                                   center))\n        return multi_level_base_anchors\n\n    def gen_single_level_base_anchors(self, base_sizes_per_level, center=None):\n        \"\"\"Generate base anchors of a single level.\n\n        Args:\n            base_sizes_per_level (list[tuple[int, int]]): Basic sizes of\n                anchors.\n            center (tuple[float], optional): The center of the base anchor\n                related to a single feature grid. Defaults to None.\n\n        Returns:\n            torch.Tensor: Anchors in a single-level feature maps.\n        \"\"\"\n        x_center, y_center = center\n        base_anchors = []\n        for base_size in base_sizes_per_level:\n            w, h = base_size\n\n            # use float anchor and the anchor's center is aligned with the\n            # pixel center\n            base_anchor = torch.Tensor([\n                x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,\n                y_center + 0.5 * h\n            ])\n            base_anchors.append(base_anchor)\n        base_anchors = torch.stack(base_anchors, dim=0)\n\n        return base_anchors\n\n    def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):\n        \"\"\"Generate responsible anchor flags of grid cells in multiple scales.\n\n        Args:\n            featmap_sizes (list(tuple)): List of feature map sizes in multiple\n                feature levels.\n            gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).\n            device (str): Device where the anchors will be put on.\n\n        Return:\n            list(torch.Tensor): responsible flags of anchors in multiple level\n        \"\"\"\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_responsible_flags = []\n        for i in range(self.num_levels):\n            anchor_stride = self.strides[i]\n            flags = self.single_level_responsible_flags(\n                featmap_sizes[i],\n                gt_bboxes,\n                anchor_stride,\n                self.num_base_anchors[i],\n                device=device)\n            multi_level_responsible_flags.append(flags)\n        return multi_level_responsible_flags\n\n    def single_level_responsible_flags(self,\n                                       featmap_size,\n                                       gt_bboxes,\n                                       stride,\n                                       num_base_anchors,\n                                       device='cuda'):\n        \"\"\"Generate the responsible flags of anchor in a single feature map.\n\n        Args:\n            featmap_size (tuple[int]): The size of feature maps.\n            gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).\n            stride (tuple(int)): stride of current level\n            num_base_anchors (int): The number of base anchors.\n            device (str, optional): Device where the flags will be put on.\n                Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: The valid flags of each anchor in a single level \\\n                feature map.\n        \"\"\"\n        feat_h, feat_w = featmap_size\n        gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device)\n        gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device)\n        gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long()\n        gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long()\n\n        # row major indexing\n        gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x\n\n        responsible_grid = torch.zeros(\n            feat_h * feat_w, dtype=torch.uint8, device=device)\n        responsible_grid[gt_bboxes_grid_idx] = 1\n\n        responsible_grid = responsible_grid[:, None].expand(\n            responsible_grid.size(0), num_base_anchors).contiguous().view(-1)\n        return responsible_grid\n"
  },
  {
    "path": "mmdet/core/anchor/builder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nfrom mmcv.utils import Registry, build_from_cfg\n\nPRIOR_GENERATORS = Registry('Generator for anchors and points')\n\nANCHOR_GENERATORS = PRIOR_GENERATORS\n\n\ndef build_prior_generator(cfg, default_args=None):\n    return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)\n\n\ndef build_anchor_generator(cfg, default_args=None):\n    warnings.warn(\n        '``build_anchor_generator`` would be deprecated soon, please use '\n        '``build_prior_generator`` ')\n    return build_prior_generator(cfg, default_args=default_args)\n"
  },
  {
    "path": "mmdet/core/anchor/point_generator.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom torch.nn.modules.utils import _pair\n\nfrom .builder import PRIOR_GENERATORS\n\n\n@PRIOR_GENERATORS.register_module()\nclass PointGenerator:\n\n    def _meshgrid(self, x, y, row_major=True):\n        xx = x.repeat(len(y))\n        yy = y.view(-1, 1).repeat(1, len(x)).view(-1)\n        if row_major:\n            return xx, yy\n        else:\n            return yy, xx\n\n    def grid_points(self, featmap_size, stride=16, device='cuda'):\n        feat_h, feat_w = featmap_size\n        shift_x = torch.arange(0., feat_w, device=device) * stride\n        shift_y = torch.arange(0., feat_h, device=device) * stride\n        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n        stride = shift_x.new_full((shift_xx.shape[0], ), stride)\n        shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1)\n        all_points = shifts.to(device)\n        return all_points\n\n    def valid_flags(self, featmap_size, valid_size, device='cuda'):\n        feat_h, feat_w = featmap_size\n        valid_h, valid_w = valid_size\n        assert valid_h <= feat_h and valid_w <= feat_w\n        valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)\n        valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)\n        valid_x[:valid_w] = 1\n        valid_y[:valid_h] = 1\n        valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)\n        valid = valid_xx & valid_yy\n        return valid\n\n\n@PRIOR_GENERATORS.register_module()\nclass MlvlPointGenerator:\n    \"\"\"Standard points generator for multi-level (Mlvl) feature maps in 2D\n    points-based detectors.\n\n    Args:\n        strides (list[int] | list[tuple[int, int]]): Strides of anchors\n            in multiple feature levels in order (w, h).\n        offset (float): The offset of points, the value is normalized with\n            corresponding stride. Defaults to 0.5.\n    \"\"\"\n\n    def __init__(self, strides, offset=0.5):\n        self.strides = [_pair(stride) for stride in strides]\n        self.offset = offset\n\n    @property\n    def num_levels(self):\n        \"\"\"int: number of feature levels that the generator will be applied\"\"\"\n        return len(self.strides)\n\n    @property\n    def num_base_priors(self):\n        \"\"\"list[int]: The number of priors (points) at a point\n        on the feature grid\"\"\"\n        return [1 for _ in range(len(self.strides))]\n\n    def _meshgrid(self, x, y, row_major=True):\n        yy, xx = torch.meshgrid(y, x)\n        if row_major:\n            # warning .flatten() would cause error in ONNX exporting\n            # have to use reshape here\n            return xx.reshape(-1), yy.reshape(-1)\n\n        else:\n            return yy.reshape(-1), xx.reshape(-1)\n\n    def grid_priors(self,\n                    featmap_sizes,\n                    dtype=torch.float32,\n                    device='cuda',\n                    with_stride=False):\n        \"\"\"Generate grid points of multiple feature levels.\n\n        Args:\n            featmap_sizes (list[tuple]): List of feature map sizes in\n                multiple feature levels, each size arrange as\n                as (h, w).\n            dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.\n            device (str): The device where the anchors will be put on.\n            with_stride (bool): Whether to concatenate the stride to\n                the last dimension of points.\n\n        Return:\n            list[torch.Tensor]: Points of  multiple feature levels.\n            The sizes of each tensor should be (N, 2) when with stride is\n            ``False``, where N = width * height, width and height\n            are the sizes of the corresponding feature level,\n            and the last dimension 2 represent (coord_x, coord_y),\n            otherwise the shape should be (N, 4),\n            and the last dimension 4 represent\n            (coord_x, coord_y, stride_w, stride_h).\n        \"\"\"\n\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_priors = []\n        for i in range(self.num_levels):\n            priors = self.single_level_grid_priors(\n                featmap_sizes[i],\n                level_idx=i,\n                dtype=dtype,\n                device=device,\n                with_stride=with_stride)\n            multi_level_priors.append(priors)\n        return multi_level_priors\n\n    def single_level_grid_priors(self,\n                                 featmap_size,\n                                 level_idx,\n                                 dtype=torch.float32,\n                                 device='cuda',\n                                 with_stride=False):\n        \"\"\"Generate grid Points of a single level.\n\n        Note:\n            This function is usually called by method ``self.grid_priors``.\n\n        Args:\n            featmap_size (tuple[int]): Size of the feature maps, arrange as\n                (h, w).\n            level_idx (int): The index of corresponding feature map level.\n            dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.\n            device (str, optional): The device the tensor will be put on.\n                Defaults to 'cuda'.\n            with_stride (bool): Concatenate the stride to the last dimension\n                of points.\n\n        Return:\n            Tensor: Points of single feature levels.\n            The shape of tensor should be (N, 2) when with stride is\n            ``False``, where N = width * height, width and height\n            are the sizes of the corresponding feature level,\n            and the last dimension 2 represent (coord_x, coord_y),\n            otherwise the shape should be (N, 4),\n            and the last dimension 4 represent\n            (coord_x, coord_y, stride_w, stride_h).\n        \"\"\"\n        feat_h, feat_w = featmap_size\n        stride_w, stride_h = self.strides[level_idx]\n        shift_x = (torch.arange(0, feat_w, device=device) +\n                   self.offset) * stride_w\n        # keep featmap_size as Tensor instead of int, so that we\n        # can convert to ONNX correctly\n        shift_x = shift_x.to(dtype)\n\n        shift_y = (torch.arange(0, feat_h, device=device) +\n                   self.offset) * stride_h\n        # keep featmap_size as Tensor instead of int, so that we\n        # can convert to ONNX correctly\n        shift_y = shift_y.to(dtype)\n        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)\n        if not with_stride:\n            shifts = torch.stack([shift_xx, shift_yy], dim=-1)\n        else:\n            # use `shape[0]` instead of `len(shift_xx)` for ONNX export\n            stride_w = shift_xx.new_full((shift_xx.shape[0], ),\n                                         stride_w).to(dtype)\n            stride_h = shift_xx.new_full((shift_yy.shape[0], ),\n                                         stride_h).to(dtype)\n            shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h],\n                                 dim=-1)\n        all_points = shifts.to(device)\n        return all_points\n\n    def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):\n        \"\"\"Generate valid flags of points of multiple feature levels.\n\n        Args:\n            featmap_sizes (list(tuple)): List of feature map sizes in\n                multiple feature levels, each size arrange as\n                as (h, w).\n            pad_shape (tuple(int)): The padded shape of the image,\n                 arrange as (h, w).\n            device (str): The device where the anchors will be put on.\n\n        Return:\n            list(torch.Tensor): Valid flags of points of multiple levels.\n        \"\"\"\n        assert self.num_levels == len(featmap_sizes)\n        multi_level_flags = []\n        for i in range(self.num_levels):\n            point_stride = self.strides[i]\n            feat_h, feat_w = featmap_sizes[i]\n            h, w = pad_shape[:2]\n            valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h)\n            valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w)\n            flags = self.single_level_valid_flags((feat_h, feat_w),\n                                                  (valid_feat_h, valid_feat_w),\n                                                  device=device)\n            multi_level_flags.append(flags)\n        return multi_level_flags\n\n    def single_level_valid_flags(self,\n                                 featmap_size,\n                                 valid_size,\n                                 device='cuda'):\n        \"\"\"Generate the valid flags of points of a single feature map.\n\n        Args:\n            featmap_size (tuple[int]): The size of feature maps, arrange as\n                as (h, w).\n            valid_size (tuple[int]): The valid size of the feature maps.\n                The size arrange as as (h, w).\n            device (str, optional): The device where the flags will be put on.\n                Defaults to 'cuda'.\n\n        Returns:\n            torch.Tensor: The valid flags of each points in a single level \\\n                feature map.\n        \"\"\"\n        feat_h, feat_w = featmap_size\n        valid_h, valid_w = valid_size\n        assert valid_h <= feat_h and valid_w <= feat_w\n        valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)\n        valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)\n        valid_x[:valid_w] = 1\n        valid_y[:valid_h] = 1\n        valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)\n        valid = valid_xx & valid_yy\n        return valid\n\n    def sparse_priors(self,\n                      prior_idxs,\n                      featmap_size,\n                      level_idx,\n                      dtype=torch.float32,\n                      device='cuda'):\n        \"\"\"Generate sparse points according to the ``prior_idxs``.\n\n        Args:\n            prior_idxs (Tensor): The index of corresponding anchors\n                in the feature map.\n            featmap_size (tuple[int]): feature map size arrange as (w, h).\n            level_idx (int): The level index of corresponding feature\n                map.\n            dtype (obj:`torch.dtype`): Date type of points. Defaults to\n                ``torch.float32``.\n            device (obj:`torch.device`): The device where the points is\n                located.\n        Returns:\n            Tensor: Anchor with shape (N, 2), N should be equal to\n            the length of ``prior_idxs``. And last dimension\n            2 represent (coord_x, coord_y).\n        \"\"\"\n        height, width = featmap_size\n        x = (prior_idxs % width + self.offset) * self.strides[level_idx][0]\n        y = ((prior_idxs // width) % height +\n             self.offset) * self.strides[level_idx][1]\n        prioris = torch.stack([x, y], 1).to(dtype)\n        prioris = prioris.to(device)\n        return prioris\n"
  },
  {
    "path": "mmdet/core/anchor/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef images_to_levels(target, num_levels):\n    \"\"\"Convert targets by image to targets by feature level.\n\n    [target_img0, target_img1] -> [target_level0, target_level1, ...]\n    \"\"\"\n    target = torch.stack(target, 0)\n    level_targets = []\n    start = 0\n    for n in num_levels:\n        end = start + n\n        # level_targets.append(target[:, start:end].squeeze(0))\n        level_targets.append(target[:, start:end])\n        start = end\n    return level_targets\n\n\ndef anchor_inside_flags(flat_anchors,\n                        valid_flags,\n                        img_shape,\n                        allowed_border=0):\n    \"\"\"Check whether the anchors are inside the border.\n\n    Args:\n        flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).\n        valid_flags (torch.Tensor): An existing valid flags of anchors.\n        img_shape (tuple(int)): Shape of current image.\n        allowed_border (int, optional): The border to allow the valid anchor.\n            Defaults to 0.\n\n    Returns:\n        torch.Tensor: Flags indicating whether the anchors are inside a \\\n            valid range.\n    \"\"\"\n    img_h, img_w = img_shape[:2]\n    if allowed_border >= 0:\n        inside_flags = valid_flags & \\\n            (flat_anchors[:, 0] >= -allowed_border) & \\\n            (flat_anchors[:, 1] >= -allowed_border) & \\\n            (flat_anchors[:, 2] < img_w + allowed_border) & \\\n            (flat_anchors[:, 3] < img_h + allowed_border)\n    else:\n        inside_flags = valid_flags\n    return inside_flags\n\n\ndef calc_region(bbox, ratio, featmap_size=None):\n    \"\"\"Calculate a proportional bbox region.\n\n    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.\n\n    Args:\n        bbox (Tensor): Bboxes to calculate regions, shape (n, 4).\n        ratio (float): Ratio of the output region.\n        featmap_size (tuple): Feature map size used for clipping the boundary.\n\n    Returns:\n        tuple: x1, y1, x2, y2\n    \"\"\"\n    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()\n    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()\n    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()\n    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()\n    if featmap_size is not None:\n        x1 = x1.clamp(min=0, max=featmap_size[1])\n        y1 = y1.clamp(min=0, max=featmap_size[0])\n        x2 = x2.clamp(min=0, max=featmap_size[1])\n        y2 = y2.clamp(min=0, max=featmap_size[0])\n    return (x1, y1, x2, y2)\n"
  },
  {
    "path": "mmdet/core/bbox/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,\n                        MaxIoUAssigner, RegionAssigner)\nfrom .builder import build_assigner, build_bbox_coder, build_sampler\nfrom .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder,\n                    PseudoBBoxCoder, TBLRBBoxCoder)\nfrom .iou_calculators import BboxOverlaps2D, bbox_overlaps\nfrom .samplers import (BaseSampler, CombinedSampler,\n                       InstanceBalancedPosSampler, IoUBalancedNegSampler,\n                       OHEMSampler, PseudoSampler, RandomSampler,\n                       SamplingResult, ScoreHLRSampler)\nfrom .transforms import (bbox2distance, bbox2result, bbox2roi,\n                         bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,\n                         bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,\n                         distance2bbox, find_inside_bboxes, roi2bbox)\n\n__all__ = [\n    'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',\n    'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',\n    'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',\n    'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',\n    'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',\n    'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',\n    'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',\n    'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder',\n    'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy',\n    'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes'\n]\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .approx_max_iou_assigner import ApproxMaxIoUAssigner\nfrom .ascend_assign_result import AscendAssignResult\nfrom .ascend_max_iou_assigner import AscendMaxIoUAssigner\nfrom .assign_result import AssignResult\nfrom .atss_assigner import ATSSAssigner\nfrom .base_assigner import BaseAssigner\nfrom .center_region_assigner import CenterRegionAssigner\nfrom .grid_assigner import GridAssigner\nfrom .hungarian_assigner import HungarianAssigner\nfrom .mask_hungarian_assigner import MaskHungarianAssigner\nfrom .max_iou_assigner import MaxIoUAssigner\nfrom .point_assigner import PointAssigner\nfrom .region_assigner import RegionAssigner\nfrom .sim_ota_assigner import SimOTAAssigner\nfrom .task_aligned_assigner import TaskAlignedAssigner\nfrom .uniform_assigner import UniformAssigner\n\n__all__ = [\n    'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',\n    'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',\n    'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',\n    'TaskAlignedAssigner', 'MaskHungarianAssigner', 'AscendAssignResult',\n    'AscendMaxIoUAssigner'\n]\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/approx_max_iou_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculators import build_iou_calculator\nfrom .max_iou_assigner import MaxIoUAssigner\n\n\n@BBOX_ASSIGNERS.register_module()\nclass ApproxMaxIoUAssigner(MaxIoUAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Each proposals will be assigned with an integer indicating the ground truth\n     index. (semi-positive index: gt label (0-based), -1: background)\n\n    - -1: negative sample, no assigned gt\n    - semi-positive integer: positive sample, index (0-based) of assigned gt\n\n    Args:\n        pos_iou_thr (float): IoU threshold for positive bboxes.\n        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.\n        min_pos_iou (float): Minimum iou for a bbox to be considered as a\n            positive bbox. Positive samples can have smaller IoU than\n            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).\n        gt_max_assign_all (bool): Whether to assign all bboxes with the same\n            highest overlap with some gt to that gt.\n        ignore_iof_thr (float): IoF threshold for ignoring bboxes (if\n            `gt_bboxes_ignore` is specified). Negative values mean not\n            ignoring any bboxes.\n        ignore_wrt_candidates (bool): Whether to compute the iof between\n            `bboxes` and `gt_bboxes_ignore`, or the contrary.\n        match_low_quality (bool): Whether to allow quality matches. This is\n            usually allowed for RPN and single stage detectors, but not allowed\n            in the second stage.\n        gpu_assign_thr (int): The upper bound of the number of GT for GPU\n            assign. When the number of gt is above this threshold, will assign\n            on CPU device. Negative values mean not assign on CPU.\n    \"\"\"\n\n    def __init__(self,\n                 pos_iou_thr,\n                 neg_iou_thr,\n                 min_pos_iou=.0,\n                 gt_max_assign_all=True,\n                 ignore_iof_thr=-1,\n                 ignore_wrt_candidates=True,\n                 match_low_quality=True,\n                 gpu_assign_thr=-1,\n                 iou_calculator=dict(type='BboxOverlaps2D')):\n        self.pos_iou_thr = pos_iou_thr\n        self.neg_iou_thr = neg_iou_thr\n        self.min_pos_iou = min_pos_iou\n        self.gt_max_assign_all = gt_max_assign_all\n        self.ignore_iof_thr = ignore_iof_thr\n        self.ignore_wrt_candidates = ignore_wrt_candidates\n        self.gpu_assign_thr = gpu_assign_thr\n        self.match_low_quality = match_low_quality\n        self.iou_calculator = build_iou_calculator(iou_calculator)\n\n    def assign(self,\n               approxs,\n               squares,\n               approxs_per_octave,\n               gt_bboxes,\n               gt_bboxes_ignore=None,\n               gt_labels=None):\n        \"\"\"Assign gt to approxs.\n\n        This method assign a gt bbox to each group of approxs (bboxes),\n        each group of approxs is represent by a base approx (bbox) and\n        will be assigned with -1, or a semi-positive number.\n        background_label (-1) means negative sample,\n        semi-positive number is the index (0-based) of assigned gt.\n        The assignment is done in following steps, the order matters.\n\n        1. assign every bbox to background_label (-1)\n        2. use the max IoU of each group of approxs to assign\n        2. assign proposals whose iou with all gts < neg_iou_thr to background\n        3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,\n           assign it to that bbox\n        4. for each gt bbox, assign its nearest proposals (may be more than\n           one) to itself\n\n        Args:\n            approxs (Tensor): Bounding boxes to be assigned,\n                shape(approxs_per_octave*n, 4).\n            squares (Tensor): Base Bounding boxes to be assigned,\n                shape(n, 4).\n            approxs_per_octave (int): number of approxs per octave\n            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n                labelled as `ignored`, e.g., crowd boxes in COCO.\n            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        num_squares = squares.size(0)\n        num_gts = gt_bboxes.size(0)\n\n        if num_squares == 0 or num_gts == 0:\n            # No predictions and/or truth, return empty assignment\n            overlaps = approxs.new(num_gts, num_squares)\n            assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)\n            return assign_result\n\n        # re-organize anchors by approxs_per_octave x num_squares\n        approxs = torch.transpose(\n            approxs.view(num_squares, approxs_per_octave, 4), 0,\n            1).contiguous().view(-1, 4)\n        assign_on_cpu = True if (self.gpu_assign_thr > 0) and (\n            num_gts > self.gpu_assign_thr) else False\n        # compute overlap and assign gt on CPU when number of GT is large\n        if assign_on_cpu:\n            device = approxs.device\n            approxs = approxs.cpu()\n            gt_bboxes = gt_bboxes.cpu()\n            if gt_bboxes_ignore is not None:\n                gt_bboxes_ignore = gt_bboxes_ignore.cpu()\n            if gt_labels is not None:\n                gt_labels = gt_labels.cpu()\n        all_overlaps = self.iou_calculator(approxs, gt_bboxes)\n\n        overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares,\n                                        num_gts).max(dim=0)\n        overlaps = torch.transpose(overlaps, 0, 1)\n\n        if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None\n                and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0):\n            if self.ignore_wrt_candidates:\n                ignore_overlaps = self.iou_calculator(\n                    squares, gt_bboxes_ignore, mode='iof')\n                ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)\n            else:\n                ignore_overlaps = self.iou_calculator(\n                    gt_bboxes_ignore, squares, mode='iof')\n                ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)\n            overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1\n\n        assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)\n        if assign_on_cpu:\n            assign_result.gt_inds = assign_result.gt_inds.to(device)\n            assign_result.max_overlaps = assign_result.max_overlaps.to(device)\n            if assign_result.labels is not None:\n                assign_result.labels = assign_result.labels.to(device)\n        return assign_result\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/ascend_assign_result.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.utils import util_mixins\n\n\nclass AscendAssignResult(util_mixins.NiceRepr):\n    \"\"\"Stores ascend assignments between predicted and truth boxes.\n\n    Arguments:\n        batch_num_gts (list[int]): the number of truth boxes considered.\n        batch_pos_mask (IntTensor): Positive samples mask in all images.\n        batch_neg_mask (IntTensor): Negative samples mask in all images.\n        batch_max_overlaps (FloatTensor): The max overlaps of all bboxes\n            and ground truth boxes.\n        batch_anchor_gt_indes(None | LongTensor): The assigned truth\n            box index of all anchors.\n        batch_anchor_gt_labels(None | LongTensor): The gt labels\n            of all anchors\n    \"\"\"\n\n    def __init__(self,\n                 batch_num_gts,\n                 batch_pos_mask,\n                 batch_neg_mask,\n                 batch_max_overlaps,\n                 batch_anchor_gt_indes=None,\n                 batch_anchor_gt_labels=None):\n        self.batch_num_gts = batch_num_gts\n        self.batch_pos_mask = batch_pos_mask\n        self.batch_neg_mask = batch_neg_mask\n        self.batch_max_overlaps = batch_max_overlaps\n        self.batch_anchor_gt_indes = batch_anchor_gt_indes\n        self.batch_anchor_gt_labels = batch_anchor_gt_labels\n        # Interface for possible user-defined properties\n        self._extra_properties = {}\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/ascend_max_iou_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ....utils import masked_fill\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculators import build_iou_calculator\nfrom .ascend_assign_result import AscendAssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@BBOX_ASSIGNERS.register_module()\nclass AscendMaxIoUAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Each proposals will be assigned with `-1`, or a semi-positive integer\n    indicating the ground truth index.\n\n    - -1: negative sample, no assigned gt\n    - semi-positive integer: positive sample, index (0-based) of assigned gt\n\n    Args:\n        pos_iou_thr (float): IoU threshold for positive bboxes.\n        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.\n        min_pos_iou (float): Minimum iou for a bbox to be considered as a\n            positive bbox. Positive samples can have smaller IoU than\n            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).\n            `min_pos_iou` is set to avoid assigning bboxes that have extremely\n            small iou with GT as positive samples. It brings about 0.3 mAP\n            improvements in 1x schedule but does not affect the performance of\n            3x schedule. More comparisons can be found in\n            `PR #7464 <https://github.com/open-mmlab/mmdetection/pull/7464>`_.\n        gt_max_assign_all (bool): Whether to assign all bboxes with the same\n            highest overlap with some gt to that gt.\n        ignore_iof_thr (float): IoF threshold for ignoring bboxes (if\n            `gt_bboxes_ignore` is specified). Negative values mean not\n            ignoring any bboxes.\n        ignore_wrt_candidates (bool): Whether to compute the iof between\n            `bboxes` and `gt_bboxes_ignore`, or the contrary.\n        match_low_quality (bool): Whether to allow low quality matches. This is\n            usually allowed for RPN and single stage detectors, but not allowed\n            in the second stage. Details are demonstrated in Step 4.\n        gpu_assign_thr (int): The upper bound of the number of GT for GPU\n            assign. When the number of gt is above this threshold, will assign\n            on CPU device. Negative values mean not assign on CPU.\n    \"\"\"\n\n    def __init__(self,\n                 pos_iou_thr,\n                 neg_iou_thr,\n                 min_pos_iou=.0,\n                 gt_max_assign_all=True,\n                 ignore_iof_thr=-1,\n                 ignore_wrt_candidates=True,\n                 match_low_quality=True,\n                 gpu_assign_thr=-1,\n                 iou_calculator=dict(type='BboxOverlaps2D')):\n        self.pos_iou_thr = pos_iou_thr\n        self.neg_iou_thr = neg_iou_thr\n        self.min_pos_iou = min_pos_iou\n        self.gt_max_assign_all = gt_max_assign_all\n        self.ignore_iof_thr = ignore_iof_thr\n        self.ignore_wrt_candidates = ignore_wrt_candidates\n        self.gpu_assign_thr = gpu_assign_thr\n        self.match_low_quality = match_low_quality\n        self.iou_calculator = build_iou_calculator(iou_calculator)\n\n    def assign(self,\n               batch_bboxes,\n               batch_gt_bboxes,\n               batch_gt_bboxes_ignore=None,\n               batch_gt_labels=None,\n               batch_bboxes_ignore_mask=None,\n               batch_num_gts=None):\n        \"\"\"Assign gt to bboxes.\n\n        Args:\n            batch_bboxes (Tensor): Bounding boxes to be assigned,\n                shape(b, n, 4).\n            batch_gt_bboxes (Tensor): Ground truth boxes,\n                shape (b, k, 4).\n            batch_gt_bboxes_ignore (Tensor, optional): Ground truth\n                bboxes that are labelled as `ignored`,\n                e.g., crowd boxes in COCO.\n            batch_gt_labels (Tensor, optional): Label of gt_bboxes,\n                shape (b, k, ).\n            batch_bboxes_ignore_mask: (b, n)\n            batch_num_gts:(b, )\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        batch_overlaps = self.iou_calculator(batch_gt_bboxes, batch_bboxes)\n        batch_overlaps = masked_fill(\n            batch_overlaps,\n            batch_bboxes_ignore_mask.unsqueeze(1).float(),\n            -1,\n            neg=True)\n        if self.ignore_iof_thr > 0 and batch_gt_bboxes_ignore is not None:\n            if self.ignore_wrt_candidates:\n                batch_ignore_overlaps = self.iou_calculator(\n                    batch_bboxes, batch_gt_bboxes_ignore, mode='iof')\n                batch_ignore_overlaps = masked_fill(batch_ignore_overlaps,\n                                                    batch_bboxes_ignore_mask,\n                                                    -1)\n                batch_ignore_max_overlaps, _ = batch_ignore_overlaps.max(dim=2)\n            else:\n                batch_ignore_overlaps = self.iou_calculator(\n                    batch_gt_bboxes_ignore, batch_bboxes, mode='iof')\n                batch_ignore_overlaps = masked_fill(batch_ignore_overlaps,\n                                                    batch_bboxes_ignore_mask,\n                                                    -1)\n                batch_ignore_max_overlaps, _ = \\\n                    batch_ignore_overlaps.max(dim=1)\n            batch_ignore_mask = \\\n                batch_ignore_max_overlaps > self.ignore_iof_thr\n            batch_overlaps = masked_fill(batch_overlaps, batch_ignore_mask, -1)\n        batch_assign_result = self.batch_assign_wrt_overlaps(\n            batch_overlaps, batch_gt_labels, batch_num_gts)\n        return batch_assign_result\n\n    def batch_assign_wrt_overlaps(self,\n                                  batch_overlaps,\n                                  batch_gt_labels=None,\n                                  batch_num_gts=None):\n        num_images, num_gts, num_bboxes = batch_overlaps.size()\n        batch_max_overlaps, batch_argmax_overlaps = batch_overlaps.max(dim=1)\n        if isinstance(self.neg_iou_thr, float):\n            batch_neg_mask = \\\n                ((batch_max_overlaps >= 0)\n                 & (batch_max_overlaps < self.neg_iou_thr)).int()\n        elif isinstance(self.neg_iou_thr, tuple):\n            assert len(self.neg_iou_thr) == 2\n            batch_neg_mask = \\\n                ((batch_max_overlaps >= self.neg_iou_thr[0])\n                 & (batch_max_overlaps < self.neg_iou_thr[1])).int()\n        else:\n            batch_neg_mask = torch.zeros(\n                batch_max_overlaps.size(),\n                dtype=torch.int,\n                device=batch_max_overlaps.device)\n        batch_pos_mask = (batch_max_overlaps >= self.pos_iou_thr).int()\n        if self.match_low_quality:\n            batch_gt_max_overlaps, batch_gt_argmax_overlaps = \\\n                batch_overlaps.max(dim=2)\n            batch_index_bool = (batch_gt_max_overlaps >= self.min_pos_iou) & \\\n                               (batch_gt_max_overlaps > 0)\n            if self.gt_max_assign_all:\n                pos_inds_low_quality = \\\n                    (batch_overlaps == batch_gt_max_overlaps.unsqueeze(2)) & \\\n                    batch_index_bool.unsqueeze(2)\n                for i in range(num_gts):\n                    pos_inds_low_quality_gt = pos_inds_low_quality[:, i, :]\n                    batch_argmax_overlaps[pos_inds_low_quality_gt] = i\n                    batch_pos_mask[pos_inds_low_quality_gt] = 1\n            else:\n                index_temp = torch.arange(\n                    0, num_gts, device=batch_max_overlaps.device)\n                for index_image in range(num_images):\n                    gt_argmax_overlaps = batch_gt_argmax_overlaps[index_image]\n                    index_bool = batch_index_bool[index_image]\n                    pos_inds_low_quality = gt_argmax_overlaps[index_bool]\n                    batch_argmax_overlaps[index_image][pos_inds_low_quality] \\\n                        = index_temp[index_bool]\n                    batch_pos_mask[index_image][pos_inds_low_quality] = 1\n        batch_neg_mask = batch_neg_mask * (1 - batch_pos_mask)\n        if batch_gt_labels is not None:\n            batch_anchor_gt_labels = torch.zeros((num_images, num_bboxes),\n                                                 dtype=batch_gt_labels.dtype,\n                                                 device=batch_gt_labels.device)\n            for index_image in range(num_images):\n                batch_anchor_gt_labels[index_image] = torch.index_select(\n                    batch_gt_labels[index_image], 0,\n                    batch_argmax_overlaps[index_image])\n        else:\n            batch_anchor_gt_labels = None\n        return AscendAssignResult(batch_num_gts, batch_pos_mask,\n                                  batch_neg_mask, batch_max_overlaps,\n                                  batch_argmax_overlaps,\n                                  batch_anchor_gt_labels)\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/assign_result.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.utils import util_mixins\n\n\nclass AssignResult(util_mixins.NiceRepr):\n    \"\"\"Stores assignments between predicted and truth boxes.\n\n    Attributes:\n        num_gts (int): the number of truth boxes considered when computing this\n            assignment\n\n        gt_inds (LongTensor): for each predicted box indicates the 1-based\n            index of the assigned truth box. 0 means unassigned and -1 means\n            ignore.\n\n        max_overlaps (FloatTensor): the iou between the predicted box and its\n            assigned truth box.\n\n        labels (None | LongTensor): If specified, for each predicted box\n            indicates the category label of the assigned truth box.\n\n    Example:\n        >>> # An assign result between 4 predicted boxes and 9 true boxes\n        >>> # where only two boxes were assigned.\n        >>> num_gts = 9\n        >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])\n        >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])\n        >>> labels = torch.LongTensor([0, 3, 4, 0])\n        >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)\n        >>> print(str(self))  # xdoctest: +IGNORE_WANT\n        <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),\n                      labels.shape=(4,))>\n        >>> # Force addition of gt labels (when adding gt as proposals)\n        >>> new_labels = torch.LongTensor([3, 4, 5])\n        >>> self.add_gt_(new_labels)\n        >>> print(str(self))  # xdoctest: +IGNORE_WANT\n        <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),\n                      labels.shape=(7,))>\n    \"\"\"\n\n    def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):\n        self.num_gts = num_gts\n        self.gt_inds = gt_inds\n        self.max_overlaps = max_overlaps\n        self.labels = labels\n        # Interface for possible user-defined properties\n        self._extra_properties = {}\n\n    @property\n    def num_preds(self):\n        \"\"\"int: the number of predictions in this assignment\"\"\"\n        return len(self.gt_inds)\n\n    def set_extra_property(self, key, value):\n        \"\"\"Set user-defined new property.\"\"\"\n        assert key not in self.info\n        self._extra_properties[key] = value\n\n    def get_extra_property(self, key):\n        \"\"\"Get user-defined property.\"\"\"\n        return self._extra_properties.get(key, None)\n\n    @property\n    def info(self):\n        \"\"\"dict: a dictionary of info about the object\"\"\"\n        basic_info = {\n            'num_gts': self.num_gts,\n            'num_preds': self.num_preds,\n            'gt_inds': self.gt_inds,\n            'max_overlaps': self.max_overlaps,\n            'labels': self.labels,\n        }\n        basic_info.update(self._extra_properties)\n        return basic_info\n\n    def __nice__(self):\n        \"\"\"str: a \"nice\" summary string describing this assign result\"\"\"\n        parts = []\n        parts.append(f'num_gts={self.num_gts!r}')\n        if self.gt_inds is None:\n            parts.append(f'gt_inds={self.gt_inds!r}')\n        else:\n            parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')\n        if self.max_overlaps is None:\n            parts.append(f'max_overlaps={self.max_overlaps!r}')\n        else:\n            parts.append('max_overlaps.shape='\n                         f'{tuple(self.max_overlaps.shape)!r}')\n        if self.labels is None:\n            parts.append(f'labels={self.labels!r}')\n        else:\n            parts.append(f'labels.shape={tuple(self.labels.shape)!r}')\n        return ', '.join(parts)\n\n    @classmethod\n    def random(cls, **kwargs):\n        \"\"\"Create random AssignResult for tests or debugging.\n\n        Args:\n            num_preds: number of predicted boxes\n            num_gts: number of true boxes\n            p_ignore (float): probability of a predicted box assigned to an\n                ignored truth\n            p_assigned (float): probability of a predicted box not being\n                assigned\n            p_use_label (float | bool): with labels or not\n            rng (None | int | numpy.random.RandomState): seed or state\n\n        Returns:\n            :obj:`AssignResult`: Randomly generated assign results.\n\n        Example:\n            >>> from mmdet.core.bbox.assigners.assign_result import *  # NOQA\n            >>> self = AssignResult.random()\n            >>> print(self.info)\n        \"\"\"\n        from mmdet.core.bbox import demodata\n        rng = demodata.ensure_rng(kwargs.get('rng', None))\n\n        num_gts = kwargs.get('num_gts', None)\n        num_preds = kwargs.get('num_preds', None)\n        p_ignore = kwargs.get('p_ignore', 0.3)\n        p_assigned = kwargs.get('p_assigned', 0.7)\n        p_use_label = kwargs.get('p_use_label', 0.5)\n        num_classes = kwargs.get('p_use_label', 3)\n\n        if num_gts is None:\n            num_gts = rng.randint(0, 8)\n        if num_preds is None:\n            num_preds = rng.randint(0, 16)\n\n        if num_gts == 0:\n            max_overlaps = torch.zeros(num_preds, dtype=torch.float32)\n            gt_inds = torch.zeros(num_preds, dtype=torch.int64)\n            if p_use_label is True or p_use_label < rng.rand():\n                labels = torch.zeros(num_preds, dtype=torch.int64)\n            else:\n                labels = None\n        else:\n            import numpy as np\n\n            # Create an overlap for each predicted box\n            max_overlaps = torch.from_numpy(rng.rand(num_preds))\n\n            # Construct gt_inds for each predicted box\n            is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)\n            # maximum number of assignments constraints\n            n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))\n\n            assigned_idxs = np.where(is_assigned)[0]\n            rng.shuffle(assigned_idxs)\n            assigned_idxs = assigned_idxs[0:n_assigned]\n            assigned_idxs.sort()\n\n            is_assigned[:] = 0\n            is_assigned[assigned_idxs] = True\n\n            is_ignore = torch.from_numpy(\n                rng.rand(num_preds) < p_ignore) & is_assigned\n\n            gt_inds = torch.zeros(num_preds, dtype=torch.int64)\n\n            true_idxs = np.arange(num_gts)\n            rng.shuffle(true_idxs)\n            true_idxs = torch.from_numpy(true_idxs)\n            gt_inds[is_assigned] = true_idxs[:n_assigned].long()\n\n            gt_inds = torch.from_numpy(\n                rng.randint(1, num_gts + 1, size=num_preds))\n            gt_inds[is_ignore] = -1\n            gt_inds[~is_assigned] = 0\n            max_overlaps[~is_assigned] = 0\n\n            if p_use_label is True or p_use_label < rng.rand():\n                if num_classes == 0:\n                    labels = torch.zeros(num_preds, dtype=torch.int64)\n                else:\n                    labels = torch.from_numpy(\n                        # remind that we set FG labels to [0, num_class-1]\n                        # since mmdet v2.0\n                        # BG cat_id: num_class\n                        rng.randint(0, num_classes, size=num_preds))\n                    labels[~is_assigned] = 0\n            else:\n                labels = None\n\n        self = cls(num_gts, gt_inds, max_overlaps, labels)\n        return self\n\n    def add_gt_(self, gt_labels):\n        \"\"\"Add ground truth as assigned results.\n\n        Args:\n            gt_labels (torch.Tensor): Labels of gt boxes\n        \"\"\"\n        self_inds = torch.arange(\n            1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device)\n        self.gt_inds = torch.cat([self_inds, self.gt_inds])\n\n        self.max_overlaps = torch.cat(\n            [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])\n\n        if self.labels is not None:\n            self.labels = torch.cat([gt_labels, self.labels])\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/atss_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculators import build_iou_calculator\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@BBOX_ASSIGNERS.register_module()\nclass ATSSAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Each proposals will be assigned with `0` or a positive integer\n    indicating the ground truth index.\n\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    If ``alpha`` is not None, it means that the dynamic cost\n    ATSSAssigner is adopted, which is currently only used in the DDOD.\n\n    Args:\n        topk (float): number of bbox selected in each level\n    \"\"\"\n\n    def __init__(self,\n                 topk,\n                 alpha=None,\n                 iou_calculator=dict(type='BboxOverlaps2D'),\n                 ignore_iof_thr=-1):\n        self.topk = topk\n        self.alpha = alpha\n        self.iou_calculator = build_iou_calculator(iou_calculator)\n        self.ignore_iof_thr = ignore_iof_thr\n\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Args:\n        topk (int): number of bbox selected in each level.\n        alpha (float): param of cost rate for each proposal only in DDOD.\n            Default None.\n        iou_calculator (dict): builder of IoU calculator.\n            Default dict(type='BboxOverlaps2D').\n        ignore_iof_thr (int): whether ignore max overlaps or not.\n            Default -1 (1 or -1).\n    \"\"\"\n\n    # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py\n    def assign(self,\n               bboxes,\n               num_level_bboxes,\n               gt_bboxes,\n               gt_bboxes_ignore=None,\n               gt_labels=None,\n               cls_scores=None,\n               bbox_preds=None):\n        \"\"\"Assign gt to bboxes.\n\n        The assignment is done in following steps\n\n        1. compute iou between all bbox (bbox of all pyramid levels) and gt\n        2. compute center distance between all bbox and gt\n        3. on each pyramid level, for each gt, select k bbox whose center\n           are closest to the gt center, so we total select k*l bbox as\n           candidates for each gt\n        4. get corresponding iou for the these candidates, and compute the\n           mean and std, set mean + std as the iou threshold\n        5. select these candidates whose iou are greater than or equal to\n           the threshold as positive\n        6. limit the positive sample's center in gt\n\n        If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds`\n        are not None, the overlaps calculation in the first step\n        will also include dynamic cost, which is currently only used in\n        the DDOD.\n\n        Args:\n            bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).\n            num_level_bboxes (List): num of bboxes in each level\n            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n                labelled as `ignored`, e.g., crowd boxes in COCO. Default None.\n            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n            cls_scores (list[Tensor]): Classification scores for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * num_classes. Default None.\n            bbox_preds (list[Tensor]): Box energies / deltas for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * 4. Default None.\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        INF = 100000000\n        bboxes = bboxes[:, :4]\n        num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)\n\n        message = 'Invalid alpha parameter because cls_scores or ' \\\n                  'bbox_preds are None. If you want to use the ' \\\n                  'cost-based ATSSAssigner,  please set cls_scores, ' \\\n                  'bbox_preds and self.alpha at the same time. '\n\n        if self.alpha is None:\n            # ATSSAssigner\n            overlaps = self.iou_calculator(bboxes, gt_bboxes)\n            if cls_scores is not None or bbox_preds is not None:\n                warnings.warn(message)\n        else:\n            # Dynamic cost ATSSAssigner in DDOD\n            assert cls_scores is not None and bbox_preds is not None, message\n\n            # compute cls cost for bbox and GT\n            cls_cost = torch.sigmoid(cls_scores[:, gt_labels])\n\n            # compute iou between all bbox and gt\n            overlaps = self.iou_calculator(bbox_preds, gt_bboxes)\n\n            # make sure that we are in element-wise multiplication\n            assert cls_cost.shape == overlaps.shape\n\n            # overlaps is actually a cost matrix\n            overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha\n\n        # assign 0 by default\n        assigned_gt_inds = overlaps.new_full((num_bboxes, ),\n                                             0,\n                                             dtype=torch.long)\n\n        if num_gt == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = overlaps.new_zeros((num_bboxes, ))\n            if num_gt == 0:\n                # No truth, assign everything to background\n                assigned_gt_inds[:] = 0\n            if gt_labels is None:\n                assigned_labels = None\n            else:\n                assigned_labels = overlaps.new_full((num_bboxes, ),\n                                                    -1,\n                                                    dtype=torch.long)\n            return AssignResult(\n                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n        # compute center distance between all bbox and gt\n        gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0\n        gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0\n        gt_points = torch.stack((gt_cx, gt_cy), dim=1)\n\n        bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0\n        bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0\n        bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)\n\n        distances = (bboxes_points[:, None, :] -\n                     gt_points[None, :, :]).pow(2).sum(-1).sqrt()\n\n        if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None\n                and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):\n            ignore_overlaps = self.iou_calculator(\n                bboxes, gt_bboxes_ignore, mode='iof')\n            ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)\n            ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr\n            distances[ignore_idxs, :] = INF\n            assigned_gt_inds[ignore_idxs] = -1\n\n        # Selecting candidates based on the center distance\n        candidate_idxs = []\n        start_idx = 0\n        for level, bboxes_per_level in enumerate(num_level_bboxes):\n            # on each pyramid level, for each gt,\n            # select k bbox whose center are closest to the gt center\n            end_idx = start_idx + bboxes_per_level\n            distances_per_level = distances[start_idx:end_idx, :]\n            selectable_k = min(self.topk, bboxes_per_level)\n\n            _, topk_idxs_per_level = distances_per_level.topk(\n                selectable_k, dim=0, largest=False)\n            candidate_idxs.append(topk_idxs_per_level + start_idx)\n            start_idx = end_idx\n        candidate_idxs = torch.cat(candidate_idxs, dim=0)\n\n        # get corresponding iou for the these candidates, and compute the\n        # mean and std, set mean + std as the iou threshold\n        candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]\n        overlaps_mean_per_gt = candidate_overlaps.mean(0)\n        overlaps_std_per_gt = candidate_overlaps.std(0)\n        overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt\n\n        is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]\n\n        # limit the positive sample's center in gt\n        for gt_idx in range(num_gt):\n            candidate_idxs[:, gt_idx] += gt_idx * num_bboxes\n        ep_bboxes_cx = bboxes_cx.view(1, -1).expand(\n            num_gt, num_bboxes).contiguous().view(-1)\n        ep_bboxes_cy = bboxes_cy.view(1, -1).expand(\n            num_gt, num_bboxes).contiguous().view(-1)\n        candidate_idxs = candidate_idxs.view(-1)\n\n        # calculate the left, top, right, bottom distance between positive\n        # bbox center and gt side\n        l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]\n        t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]\n        r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)\n        b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)\n        is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01\n\n        is_pos = is_pos & is_in_gts\n\n        # if an anchor box is assigned to multiple gts,\n        # the one with the highest IoU will be selected.\n        overlaps_inf = torch.full_like(overlaps,\n                                       -INF).t().contiguous().view(-1)\n        index = candidate_idxs.view(-1)[is_pos.view(-1)]\n        overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]\n        overlaps_inf = overlaps_inf.view(num_gt, -1).t()\n\n        max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)\n        assigned_gt_inds[\n            max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1\n\n        if gt_labels is not None:\n            assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n            pos_inds = torch.nonzero(\n                assigned_gt_inds > 0, as_tuple=False).squeeze()\n            if pos_inds.numel() > 0:\n                assigned_labels[pos_inds] = gt_labels[\n                    assigned_gt_inds[pos_inds] - 1]\n        else:\n            assigned_labels = None\n        return AssignResult(\n            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/base_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\n\nclass BaseAssigner(metaclass=ABCMeta):\n    \"\"\"Base assigner that assigns boxes to ground truth boxes.\"\"\"\n\n    @abstractmethod\n    def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):\n        \"\"\"Assign boxes to either a ground truth boxes or a negative boxes.\"\"\"\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/center_region_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculators import build_iou_calculator\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\ndef scale_boxes(bboxes, scale):\n    \"\"\"Expand an array of boxes by a given scale.\n\n    Args:\n        bboxes (Tensor): Shape (m, 4)\n        scale (float): The scale factor of bboxes\n\n    Returns:\n        (Tensor): Shape (m, 4). Scaled bboxes\n    \"\"\"\n    assert bboxes.size(1) == 4\n    w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5\n    h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5\n    x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5\n    y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5\n\n    w_half *= scale\n    h_half *= scale\n\n    boxes_scaled = torch.zeros_like(bboxes)\n    boxes_scaled[:, 0] = x_c - w_half\n    boxes_scaled[:, 2] = x_c + w_half\n    boxes_scaled[:, 1] = y_c - h_half\n    boxes_scaled[:, 3] = y_c + h_half\n    return boxes_scaled\n\n\ndef is_located_in(points, bboxes):\n    \"\"\"Are points located in bboxes.\n\n    Args:\n      points (Tensor): Points, shape: (m, 2).\n      bboxes (Tensor): Bounding boxes, shape: (n, 4).\n\n    Return:\n      Tensor: Flags indicating if points are located in bboxes, shape: (m, n).\n    \"\"\"\n    assert points.size(1) == 2\n    assert bboxes.size(1) == 4\n    return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \\\n           (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \\\n           (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \\\n           (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0))\n\n\ndef bboxes_area(bboxes):\n    \"\"\"Compute the area of an array of bboxes.\n\n    Args:\n        bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4)\n\n    Returns:\n        Tensor: Area of the bboxes. Shape: (m, )\n    \"\"\"\n    assert bboxes.size(1) == 4\n    w = (bboxes[:, 2] - bboxes[:, 0])\n    h = (bboxes[:, 3] - bboxes[:, 1])\n    areas = w * h\n    return areas\n\n\n@BBOX_ASSIGNERS.register_module()\nclass CenterRegionAssigner(BaseAssigner):\n    \"\"\"Assign pixels at the center region of a bbox as positive.\n\n    Each proposals will be assigned with `-1`, `0`, or a positive integer\n    indicating the ground truth index.\n    - -1: negative samples\n    - semi-positive numbers: positive sample, index (0-based) of assigned gt\n\n    Args:\n        pos_scale (float): Threshold within which pixels are\n          labelled as positive.\n        neg_scale (float): Threshold above which pixels are\n          labelled as positive.\n        min_pos_iof (float): Minimum iof of a pixel with a gt to be\n          labelled as positive. Default: 1e-2\n        ignore_gt_scale (float): Threshold within which the pixels\n          are ignored when the gt is labelled as shadowed. Default: 0.5\n        foreground_dominate (bool): If True, the bbox will be assigned as\n          positive when a gt's kernel region overlaps with another's shadowed\n          (ignored) region, otherwise it is set as ignored. Default to False.\n    \"\"\"\n\n    def __init__(self,\n                 pos_scale,\n                 neg_scale,\n                 min_pos_iof=1e-2,\n                 ignore_gt_scale=0.5,\n                 foreground_dominate=False,\n                 iou_calculator=dict(type='BboxOverlaps2D')):\n        self.pos_scale = pos_scale\n        self.neg_scale = neg_scale\n        self.min_pos_iof = min_pos_iof\n        self.ignore_gt_scale = ignore_gt_scale\n        self.foreground_dominate = foreground_dominate\n        self.iou_calculator = build_iou_calculator(iou_calculator)\n\n    def get_gt_priorities(self, gt_bboxes):\n        \"\"\"Get gt priorities according to their areas.\n\n        Smaller gt has higher priority.\n\n        Args:\n            gt_bboxes (Tensor): Ground truth boxes, shape (k, 4).\n\n        Returns:\n            Tensor: The priority of gts so that gts with larger priority is \\\n              more likely to be assigned. Shape (k, )\n        \"\"\"\n        gt_areas = bboxes_area(gt_bboxes)\n        # Rank all gt bbox areas. Smaller objects has larger priority\n        _, sort_idx = gt_areas.sort(descending=True)\n        sort_idx = sort_idx.argsort()\n        return sort_idx\n\n    def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):\n        \"\"\"Assign gt to bboxes.\n\n        This method assigns gts to every bbox (proposal/anchor), each bbox \\\n        will be assigned with -1, or a semi-positive number. -1 means \\\n        negative sample, semi-positive number is the index (0-based) of \\\n        assigned gt.\n\n        Args:\n            bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).\n            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n            gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are\n              labelled as `ignored`, e.g., crowd boxes in COCO.\n            gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,).\n\n        Returns:\n            :obj:`AssignResult`: The assigned result. Note that \\\n              shadowed_labels of shape (N, 2) is also added as an \\\n              `assign_result` attribute. `shadowed_labels` is a tensor \\\n              composed of N pairs of anchor_ind, class_label], where N \\\n              is the number of anchors that lie in the outer region of a \\\n              gt, anchor_ind is the shadowed anchor index and class_label \\\n              is the shadowed class label.\n\n        Example:\n            >>> self = CenterRegionAssigner(0.2, 0.2)\n            >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])\n            >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]])\n            >>> assign_result = self.assign(bboxes, gt_bboxes)\n            >>> expected_gt_inds = torch.LongTensor([1, 0])\n            >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)\n        \"\"\"\n        # There are in total 5 steps in the pixel assignment\n        # 1. Find core (the center region, say inner 0.2)\n        #     and shadow (the relatively ourter part, say inner 0.2-0.5)\n        #     regions of every gt.\n        # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions\n        # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in\n        #      the image.\n        #    3.1. For overlapping objects, the prior bboxes in gt_core is\n        #           assigned with the object with smallest area\n        # 4. Assign prior bboxes with class label according to its gt id.\n        #    4.1. Assign -1 to prior bboxes lying in shadowed gts\n        #    4.2. Assign positive prior boxes with the corresponding label\n        # 5. Find pixels lying in the shadow of an object and assign them with\n        #      background label, but set the loss weight of its corresponding\n        #      gt to zero.\n        assert bboxes.size(1) == 4, 'bboxes must have size of 4'\n        # 1. Find core positive and shadow region of every gt\n        gt_core = scale_boxes(gt_bboxes, self.pos_scale)\n        gt_shadow = scale_boxes(gt_bboxes, self.neg_scale)\n\n        # 2. Find prior bboxes that lie in gt_core and gt_shadow regions\n        bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2\n        # The center points lie within the gt boxes\n        is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes)\n        # Only calculate bbox and gt_core IoF. This enables small prior bboxes\n        #   to match large gts\n        bbox_and_gt_core_overlaps = self.iou_calculator(\n            bboxes, gt_core, mode='iof')\n        # The center point of effective priors should be within the gt box\n        is_bbox_in_gt_core = is_bbox_in_gt & (\n            bbox_and_gt_core_overlaps > self.min_pos_iof)  # shape (n, k)\n\n        is_bbox_in_gt_shadow = (\n            self.iou_calculator(bboxes, gt_shadow, mode='iof') >\n            self.min_pos_iof)\n        # Rule out center effective positive pixels\n        is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core)\n\n        num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)\n        if num_gts == 0 or num_bboxes == 0:\n            # If no gts exist, assign all pixels to negative\n            assigned_gt_ids = \\\n                is_bbox_in_gt_core.new_zeros((num_bboxes,),\n                                             dtype=torch.long)\n            pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2))\n        else:\n            # Step 3: assign a one-hot gt id to each pixel, and smaller objects\n            #    have high priority to assign the pixel.\n            sort_idx = self.get_gt_priorities(gt_bboxes)\n            assigned_gt_ids, pixels_in_gt_shadow = \\\n                self.assign_one_hot_gt_indices(is_bbox_in_gt_core,\n                                               is_bbox_in_gt_shadow,\n                                               gt_priority=sort_idx)\n\n        if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0:\n            # No ground truth or boxes, return empty assignment\n            gt_bboxes_ignore = scale_boxes(\n                gt_bboxes_ignore, scale=self.ignore_gt_scale)\n            is_bbox_in_ignored_gts = is_located_in(bbox_centers,\n                                                   gt_bboxes_ignore)\n            is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1)\n            assigned_gt_ids[is_bbox_in_ignored_gts] = -1\n\n        # 4. Assign prior bboxes with class label according to its gt id.\n        assigned_labels = None\n        shadowed_pixel_labels = None\n        if gt_labels is not None:\n            # Default assigned label is the background (-1)\n            assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1)\n            pos_inds = torch.nonzero(\n                assigned_gt_ids > 0, as_tuple=False).squeeze()\n            if pos_inds.numel() > 0:\n                assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds]\n                                                      - 1]\n            # 5. Find pixels lying in the shadow of an object\n            shadowed_pixel_labels = pixels_in_gt_shadow.clone()\n            if pixels_in_gt_shadow.numel() > 0:\n                pixel_idx, gt_idx =\\\n                    pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1]\n                assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \\\n                    'Some pixels are dually assigned to ignore and gt!'\n                shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1]\n                override = (\n                    assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1])\n                if self.foreground_dominate:\n                    # When a pixel is both positive and shadowed, set it as pos\n                    shadowed_pixel_labels = shadowed_pixel_labels[~override]\n                else:\n                    # When a pixel is both pos and shadowed, set it as shadowed\n                    assigned_labels[pixel_idx[override]] = -1\n                    assigned_gt_ids[pixel_idx[override]] = 0\n\n        assign_result = AssignResult(\n            num_gts, assigned_gt_ids, None, labels=assigned_labels)\n        # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2)\n        assign_result.set_extra_property('shadowed_labels',\n                                         shadowed_pixel_labels)\n        return assign_result\n\n    def assign_one_hot_gt_indices(self,\n                                  is_bbox_in_gt_core,\n                                  is_bbox_in_gt_shadow,\n                                  gt_priority=None):\n        \"\"\"Assign only one gt index to each prior box.\n\n        Gts with large gt_priority are more likely to be assigned.\n\n        Args:\n            is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center\n              is in the core area of a gt (e.g. 0-0.2).\n              Shape: (num_prior, num_gt).\n            is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox\n              center is in the shadowed area of a gt (e.g. 0.2-0.5).\n              Shape: (num_prior, num_gt).\n            gt_priority (Tensor): Priorities of gts. The gt with a higher\n              priority is more likely to be assigned to the bbox when the bbox\n              match with multiple gts. Shape: (num_gt, ).\n\n        Returns:\n            tuple: Returns (assigned_gt_inds, shadowed_gt_inds).\n\n                - assigned_gt_inds: The assigned gt index of each prior bbox \\\n                    (i.e. index from 1 to num_gts). Shape: (num_prior, ).\n                - shadowed_gt_inds: shadowed gt indices. It is a tensor of \\\n                    shape (num_ignore, 2) with first column being the \\\n                    shadowed prior bbox indices and the second column the \\\n                    shadowed gt indices (1-based).\n        \"\"\"\n        num_bboxes, num_gts = is_bbox_in_gt_core.shape\n\n        if gt_priority is None:\n            gt_priority = torch.arange(\n                num_gts, device=is_bbox_in_gt_core.device)\n        assert gt_priority.size(0) == num_gts\n        # The bigger gt_priority, the more preferable to be assigned\n        # The assigned inds are by default 0 (background)\n        assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ),\n                                                        dtype=torch.long)\n        # Shadowed bboxes are assigned to be background. But the corresponding\n        #   label is ignored during loss calculation, which is done through\n        #   shadowed_gt_inds\n        shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False)\n        if is_bbox_in_gt_core.sum() == 0:  # No gt match\n            shadowed_gt_inds[:, 1] += 1  # 1-based. For consistency issue\n            return assigned_gt_inds, shadowed_gt_inds\n\n        # The priority of each prior box and gt pair. If one prior box is\n        #  matched bo multiple gts. Only the pair with the highest priority\n        #  is saved\n        pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts),\n                                                    -1,\n                                                    dtype=torch.long)\n\n        # Each bbox could match with multiple gts.\n        # The following codes deal with this situation\n        # Matched  bboxes (to any gt). Shape: (num_pos_anchor, )\n        inds_of_match = torch.any(is_bbox_in_gt_core, dim=1)\n        # The matched gt index of each positive bbox. Length >= num_pos_anchor\n        #   , since one bbox could match multiple gts\n        matched_bbox_gt_inds = torch.nonzero(\n            is_bbox_in_gt_core, as_tuple=False)[:, 1]\n        # Assign priority to each bbox-gt pair.\n        pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds]\n        _, argmax_priority = pair_priority[inds_of_match].max(dim=1)\n        assigned_gt_inds[inds_of_match] = argmax_priority + 1  # 1-based\n        # Zero-out the assigned anchor box to filter the shadowed gt indices\n        is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0\n        # Concat the shadowed indices due to overlapping with that out side of\n        #   effective scale. shape: (total_num_ignore, 2)\n        shadowed_gt_inds = torch.cat(\n            (shadowed_gt_inds, torch.nonzero(\n                is_bbox_in_gt_core, as_tuple=False)),\n            dim=0)\n        # `is_bbox_in_gt_core` should be changed back to keep arguments intact.\n        is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1\n        # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds`\n        if shadowed_gt_inds.numel() > 0:\n            shadowed_gt_inds[:, 1] += 1\n        return assigned_gt_inds, shadowed_gt_inds\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/grid_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculators import build_iou_calculator\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@BBOX_ASSIGNERS.register_module()\nclass GridAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Each proposals will be assigned with `-1`, `0`, or a positive integer\n    indicating the ground truth index.\n\n    - -1: don't care\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    Args:\n        pos_iou_thr (float): IoU threshold for positive bboxes.\n        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.\n        min_pos_iou (float): Minimum iou for a bbox to be considered as a\n            positive bbox. Positive samples can have smaller IoU than\n            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).\n        gt_max_assign_all (bool): Whether to assign all bboxes with the same\n            highest overlap with some gt to that gt.\n    \"\"\"\n\n    def __init__(self,\n                 pos_iou_thr,\n                 neg_iou_thr,\n                 min_pos_iou=.0,\n                 gt_max_assign_all=True,\n                 iou_calculator=dict(type='BboxOverlaps2D')):\n        self.pos_iou_thr = pos_iou_thr\n        self.neg_iou_thr = neg_iou_thr\n        self.min_pos_iou = min_pos_iou\n        self.gt_max_assign_all = gt_max_assign_all\n        self.iou_calculator = build_iou_calculator(iou_calculator)\n\n    def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None):\n        \"\"\"Assign gt to bboxes. The process is very much like the max iou\n        assigner, except that positive samples are constrained within the cell\n        that the gt boxes fell in.\n\n        This method assign a gt bbox to every bbox (proposal/anchor), each bbox\n        will be assigned with -1, 0, or a positive number. -1 means don't care,\n        0 means negative sample, positive number is the index (1-based) of\n        assigned gt.\n        The assignment is done in following steps, the order matters.\n\n        1. assign every bbox to -1\n        2. assign proposals whose iou with all gts <= neg_iou_thr to 0\n        3. for each bbox within a cell, if the iou with its nearest gt >\n            pos_iou_thr and the center of that gt falls inside the cell,\n            assign it to that bbox\n        4. for each gt bbox, assign its nearest proposals within the cell the\n            gt bbox falls in to itself.\n\n        Args:\n            bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).\n            box_responsible_flags (Tensor): flag to indicate whether box is\n                responsible for prediction, shape(n, )\n            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)\n\n        # compute iou between all gt and bboxes\n        overlaps = self.iou_calculator(gt_bboxes, bboxes)\n\n        # 1. assign -1 by default\n        assigned_gt_inds = overlaps.new_full((num_bboxes, ),\n                                             -1,\n                                             dtype=torch.long)\n\n        if num_gts == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = overlaps.new_zeros((num_bboxes, ))\n            if num_gts == 0:\n                # No truth, assign everything to background\n                assigned_gt_inds[:] = 0\n            if gt_labels is None:\n                assigned_labels = None\n            else:\n                assigned_labels = overlaps.new_full((num_bboxes, ),\n                                                    -1,\n                                                    dtype=torch.long)\n            return AssignResult(\n                num_gts,\n                assigned_gt_inds,\n                max_overlaps,\n                labels=assigned_labels)\n\n        # 2. assign negative: below\n        # for each anchor, which gt best overlaps with it\n        # for each anchor, the max iou of all gts\n        # shape of max_overlaps == argmax_overlaps == num_bboxes\n        max_overlaps, argmax_overlaps = overlaps.max(dim=0)\n\n        if isinstance(self.neg_iou_thr, float):\n            assigned_gt_inds[(max_overlaps >= 0)\n                             & (max_overlaps <= self.neg_iou_thr)] = 0\n        elif isinstance(self.neg_iou_thr, (tuple, list)):\n            assert len(self.neg_iou_thr) == 2\n            assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0])\n                             & (max_overlaps <= self.neg_iou_thr[1])] = 0\n\n        # 3. assign positive: falls into responsible cell and above\n        # positive IOU threshold, the order matters.\n        # the prior condition of comparison is to filter out all\n        # unrelated anchors, i.e. not box_responsible_flags\n        overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1.\n\n        # calculate max_overlaps again, but this time we only consider IOUs\n        # for anchors responsible for prediction\n        max_overlaps, argmax_overlaps = overlaps.max(dim=0)\n\n        # for each gt, which anchor best overlaps with it\n        # for each gt, the max iou of all proposals\n        # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts\n        gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)\n\n        pos_inds = (max_overlaps >\n                    self.pos_iou_thr) & box_responsible_flags.type(torch.bool)\n        assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1\n\n        # 4. assign positive to max overlapped anchors within responsible cell\n        for i in range(num_gts):\n            if gt_max_overlaps[i] > self.min_pos_iou:\n                if self.gt_max_assign_all:\n                    max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \\\n                         box_responsible_flags.type(torch.bool)\n                    assigned_gt_inds[max_iou_inds] = i + 1\n                elif box_responsible_flags[gt_argmax_overlaps[i]]:\n                    assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1\n\n        # assign labels of positive anchors\n        if gt_labels is not None:\n            assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n            pos_inds = torch.nonzero(\n                assigned_gt_inds > 0, as_tuple=False).squeeze()\n            if pos_inds.numel() > 0:\n                assigned_labels[pos_inds] = gt_labels[\n                    assigned_gt_inds[pos_inds] - 1]\n\n        else:\n            assigned_labels = None\n\n        return AssignResult(\n            num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/hungarian_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom scipy.optimize import linear_sum_assignment\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..match_costs import build_match_cost\nfrom ..transforms import bbox_cxcywh_to_xyxy\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@BBOX_ASSIGNERS.register_module()\nclass HungarianAssigner(BaseAssigner):\n    \"\"\"Computes one-to-one matching between predictions and ground truth.\n\n    This class computes an assignment between the targets and the predictions\n    based on the costs. The costs are weighted sum of three components:\n    classification cost, regression L1 cost and regression iou cost. The\n    targets don't include the no_object, so generally there are more\n    predictions than targets. After the one-to-one matching, the un-matched\n    are treated as backgrounds. Thus each query prediction will be assigned\n    with `0` or a positive integer indicating the ground truth index:\n\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    Args:\n        cls_weight (int | float, optional): The scale factor for classification\n            cost. Default 1.0.\n        bbox_weight (int | float, optional): The scale factor for regression\n            L1 cost. Default 1.0.\n        iou_weight (int | float, optional): The scale factor for regression\n            iou cost. Default 1.0.\n        iou_calculator (dict | optional): The config for the iou calculation.\n            Default type `BboxOverlaps2D`.\n        iou_mode (str | optional): \"iou\" (intersection over union), \"iof\"\n                (intersection over foreground), or \"giou\" (generalized\n                intersection over union). Default \"giou\".\n    \"\"\"\n\n    def __init__(self,\n                 cls_cost=dict(type='ClassificationCost', weight=1.),\n                 reg_cost=dict(type='BBoxL1Cost', weight=1.0),\n                 iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):\n        self.cls_cost = build_match_cost(cls_cost)\n        self.reg_cost = build_match_cost(reg_cost)\n        self.iou_cost = build_match_cost(iou_cost)\n\n    def assign(self,\n               bbox_pred,\n               cls_pred,\n               gt_bboxes,\n               gt_labels,\n               img_meta,\n               gt_bboxes_ignore=None,\n               eps=1e-7):\n        \"\"\"Computes one-to-one matching based on the weighted costs.\n\n        This method assign each query prediction to a ground truth or\n        background. The `assigned_gt_inds` with -1 means don't care,\n        0 means negative sample, and positive number is the index (1-based)\n        of assigned gt.\n        The assignment is done in the following steps, the order matters.\n\n        1. assign every prediction to -1\n        2. compute the weighted costs\n        3. do Hungarian matching on CPU based on the costs\n        4. assign all to 0 (background) first, then for each matched pair\n           between predictions and gts, treat this prediction as foreground\n           and assign the corresponding gt index (plus 1) to it.\n\n        Args:\n            bbox_pred (Tensor): Predicted boxes with normalized coordinates\n                (cx, cy, w, h), which are all in range [0, 1]. Shape\n                [num_query, 4].\n            cls_pred (Tensor): Predicted classification logits, shape\n                [num_query, num_class].\n            gt_bboxes (Tensor): Ground truth boxes with unnormalized\n                coordinates (x1, y1, x2, y2). Shape [num_gt, 4].\n            gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n            img_meta (dict): Meta information for current image.\n            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n                labelled as `ignored`. Default None.\n            eps (int | float, optional): A value added to the denominator for\n                numerical stability. Default 1e-7.\n\n        Returns:\n            :obj:`AssignResult`: The assigned result.\n        \"\"\"\n        assert gt_bboxes_ignore is None, \\\n            'Only case when gt_bboxes_ignore is None is supported.'\n        num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)\n\n        # 1. assign -1 by default\n        assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),\n                                              -1,\n                                              dtype=torch.long)\n        assigned_labels = bbox_pred.new_full((num_bboxes, ),\n                                             -1,\n                                             dtype=torch.long)\n        if num_gts == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            if num_gts == 0:\n                # No ground truth, assign all to background\n                assigned_gt_inds[:] = 0\n            return AssignResult(\n                num_gts, assigned_gt_inds, None, labels=assigned_labels)\n        img_h, img_w, _ = img_meta['img_shape']\n        factor = gt_bboxes.new_tensor([img_w, img_h, img_w,\n                                       img_h]).unsqueeze(0)\n\n        # 2. compute the weighted costs\n        # classification and bboxcost.\n        cls_cost = self.cls_cost(cls_pred, gt_labels)\n        # regression L1 cost\n        normalize_gt_bboxes = gt_bboxes / factor\n        reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)\n        # regression iou cost, defaultly giou is used in official DETR.\n        bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor\n        iou_cost = self.iou_cost(bboxes, gt_bboxes)\n        # weighted sum of above three costs\n        cost = cls_cost + reg_cost + iou_cost\n\n        # 3. do Hungarian matching on CPU using linear_sum_assignment\n        cost = cost.detach().cpu()\n        matched_row_inds, matched_col_inds = linear_sum_assignment(cost)\n        matched_row_inds = torch.from_numpy(matched_row_inds).to(\n            bbox_pred.device)\n        matched_col_inds = torch.from_numpy(matched_col_inds).to(\n            bbox_pred.device)\n\n        # 4. assign backgrounds and foregrounds\n        # assign all indices to backgrounds first\n        assigned_gt_inds[:] = 0\n        # assign foregrounds based on matching results\n        assigned_gt_inds[matched_row_inds] = matched_col_inds + 1\n        assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]\n        return AssignResult(\n            num_gts, assigned_gt_inds, None, labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/mask_hungarian_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom scipy.optimize import linear_sum_assignment\n\nfrom mmdet.core.bbox.builder import BBOX_ASSIGNERS\nfrom mmdet.core.bbox.match_costs.builder import build_match_cost\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@BBOX_ASSIGNERS.register_module()\nclass MaskHungarianAssigner(BaseAssigner):\n    \"\"\"Computes one-to-one matching between predictions and ground truth for\n    mask.\n\n    This class computes an assignment between the targets and the predictions\n    based on the costs. The costs are weighted sum of three components:\n    classification cost, mask focal cost and mask dice cost. The\n    targets don't include the no_object, so generally there are more\n    predictions than targets. After the one-to-one matching, the un-matched\n    are treated as backgrounds. Thus each query prediction will be assigned\n    with `0` or a positive integer indicating the ground truth index:\n\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    Args:\n        cls_cost (:obj:`mmcv.ConfigDict` | dict): Classification cost config.\n        mask_cost (:obj:`mmcv.ConfigDict` | dict): Mask cost config.\n        dice_cost (:obj:`mmcv.ConfigDict` | dict): Dice cost config.\n    \"\"\"\n\n    def __init__(self,\n                 cls_cost=dict(type='ClassificationCost', weight=1.0),\n                 mask_cost=dict(\n                     type='FocalLossCost', weight=1.0, binary_input=True),\n                 dice_cost=dict(type='DiceCost', weight=1.0)):\n        self.cls_cost = build_match_cost(cls_cost)\n        self.mask_cost = build_match_cost(mask_cost)\n        self.dice_cost = build_match_cost(dice_cost)\n\n    def assign(self,\n               cls_pred,\n               mask_pred,\n               gt_labels,\n               gt_mask,\n               img_meta,\n               gt_bboxes_ignore=None,\n               eps=1e-7):\n        \"\"\"Computes one-to-one matching based on the weighted costs.\n\n        Args:\n            cls_pred (Tensor | None): Class prediction in shape\n                (num_query, cls_out_channels).\n            mask_pred (Tensor): Mask prediction in shape (num_query, H, W).\n            gt_labels (Tensor): Label of 'gt_mask'in shape = (num_gt, ).\n            gt_mask (Tensor): Ground truth mask in shape = (num_gt, H, W).\n            img_meta (dict): Meta information for current image.\n            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n                labelled as `ignored`. Default None.\n            eps (int | float, optional): A value added to the denominator for\n                numerical stability. Default 1e-7.\n\n        Returns:\n            :obj:`AssignResult`: The assigned result.\n        \"\"\"\n        assert gt_bboxes_ignore is None, \\\n            'Only case when gt_bboxes_ignore is None is supported.'\n        # K-Net sometimes passes cls_pred=None to this assigner.\n        # So we should use the shape of mask_pred\n        num_gt, num_query = gt_labels.shape[0], mask_pred.shape[0]\n\n        # 1. assign -1 by default\n        assigned_gt_inds = mask_pred.new_full((num_query, ),\n                                              -1,\n                                              dtype=torch.long)\n        assigned_labels = mask_pred.new_full((num_query, ),\n                                             -1,\n                                             dtype=torch.long)\n        if num_gt == 0 or num_query == 0:\n            # No ground truth or boxes, return empty assignment\n            if num_gt == 0:\n                # No ground truth, assign all to background\n                assigned_gt_inds[:] = 0\n            return AssignResult(\n                num_gt, assigned_gt_inds, None, labels=assigned_labels)\n\n        # 2. compute the weighted costs\n        # classification and maskcost.\n        if self.cls_cost.weight != 0 and cls_pred is not None:\n            cls_cost = self.cls_cost(cls_pred, gt_labels)\n        else:\n            cls_cost = 0\n\n        if self.mask_cost.weight != 0:\n            # mask_pred shape = [num_query, h, w]\n            # gt_mask shape = [num_gt, h, w]\n            # mask_cost shape = [num_query, num_gt]\n            mask_cost = self.mask_cost(mask_pred, gt_mask)\n        else:\n            mask_cost = 0\n\n        if self.dice_cost.weight != 0:\n            dice_cost = self.dice_cost(mask_pred, gt_mask)\n        else:\n            dice_cost = 0\n        cost = cls_cost + mask_cost + dice_cost\n\n        # 3. do Hungarian matching on CPU using linear_sum_assignment\n        cost = cost.detach().cpu()\n\n        matched_row_inds, matched_col_inds = linear_sum_assignment(cost)\n        matched_row_inds = torch.from_numpy(matched_row_inds).to(\n            mask_pred.device)\n        matched_col_inds = torch.from_numpy(matched_col_inds).to(\n            mask_pred.device)\n\n        # 4. assign backgrounds and foregrounds\n        # assign all indices to backgrounds first\n        assigned_gt_inds[:] = 0\n        # assign foregrounds based on matching results\n        assigned_gt_inds[matched_row_inds] = matched_col_inds + 1\n        assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]\n        return AssignResult(\n            num_gt, assigned_gt_inds, None, labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/max_iou_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculators import build_iou_calculator\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@BBOX_ASSIGNERS.register_module()\nclass MaxIoUAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Each proposals will be assigned with `-1`, or a semi-positive integer\n    indicating the ground truth index.\n\n    - -1: negative sample, no assigned gt\n    - semi-positive integer: positive sample, index (0-based) of assigned gt\n\n    Args:\n        pos_iou_thr (float): IoU threshold for positive bboxes.\n        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.\n        min_pos_iou (float): Minimum iou for a bbox to be considered as a\n            positive bbox. Positive samples can have smaller IoU than\n            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).\n            `min_pos_iou` is set to avoid assigning bboxes that have extremely\n            small iou with GT as positive samples. It brings about 0.3 mAP\n            improvements in 1x schedule but does not affect the performance of\n            3x schedule. More comparisons can be found in\n            `PR #7464 <https://github.com/open-mmlab/mmdetection/pull/7464>`_.\n        gt_max_assign_all (bool): Whether to assign all bboxes with the same\n            highest overlap with some gt to that gt.\n        ignore_iof_thr (float): IoF threshold for ignoring bboxes (if\n            `gt_bboxes_ignore` is specified). Negative values mean not\n            ignoring any bboxes.\n        ignore_wrt_candidates (bool): Whether to compute the iof between\n            `bboxes` and `gt_bboxes_ignore`, or the contrary.\n        match_low_quality (bool): Whether to allow low quality matches. This is\n            usually allowed for RPN and single stage detectors, but not allowed\n            in the second stage. Details are demonstrated in Step 4.\n        gpu_assign_thr (int): The upper bound of the number of GT for GPU\n            assign. When the number of gt is above this threshold, will assign\n            on CPU device. Negative values mean not assign on CPU.\n    \"\"\"\n\n    def __init__(self,\n                 pos_iou_thr,\n                 neg_iou_thr,\n                 min_pos_iou=.0,\n                 gt_max_assign_all=True,\n                 ignore_iof_thr=-1,\n                 ignore_wrt_candidates=True,\n                 match_low_quality=True,\n                 gpu_assign_thr=-1,\n                 iou_calculator=dict(type='BboxOverlaps2D')):\n        self.pos_iou_thr = pos_iou_thr\n        self.neg_iou_thr = neg_iou_thr\n        self.min_pos_iou = min_pos_iou\n        self.gt_max_assign_all = gt_max_assign_all\n        self.ignore_iof_thr = ignore_iof_thr\n        self.ignore_wrt_candidates = ignore_wrt_candidates\n        self.gpu_assign_thr = gpu_assign_thr\n        self.match_low_quality = match_low_quality\n        self.iou_calculator = build_iou_calculator(iou_calculator)\n\n    def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):\n        \"\"\"Assign gt to bboxes.\n\n        This method assign a gt bbox to every bbox (proposal/anchor), each bbox\n        will be assigned with -1, or a semi-positive number. -1 means negative\n        sample, semi-positive number is the index (0-based) of assigned gt.\n        The assignment is done in following steps, the order matters.\n\n        1. assign every bbox to the background\n        2. assign proposals whose iou with all gts < neg_iou_thr to 0\n        3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,\n           assign it to that bbox\n        4. for each gt bbox, assign its nearest proposals (may be more than\n           one) to itself\n\n        Args:\n            bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).\n            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n                labelled as `ignored`, e.g., crowd boxes in COCO.\n            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n\n        Example:\n            >>> self = MaxIoUAssigner(0.5, 0.5)\n            >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])\n            >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]])\n            >>> assign_result = self.assign(bboxes, gt_bboxes)\n            >>> expected_gt_inds = torch.LongTensor([1, 0])\n            >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)\n        \"\"\"\n        assign_on_cpu = True if (self.gpu_assign_thr > 0) and (\n            gt_bboxes.shape[0] > self.gpu_assign_thr) else False\n        # compute overlap and assign gt on CPU when number of GT is large\n        if assign_on_cpu:\n            device = bboxes.device\n            bboxes = bboxes.cpu()\n            gt_bboxes = gt_bboxes.cpu()\n            if gt_bboxes_ignore is not None:\n                gt_bboxes_ignore = gt_bboxes_ignore.cpu()\n            if gt_labels is not None:\n                gt_labels = gt_labels.cpu()\n\n        overlaps = self.iou_calculator(gt_bboxes, bboxes)\n\n        if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None\n                and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):\n            if self.ignore_wrt_candidates:\n                ignore_overlaps = self.iou_calculator(\n                    bboxes, gt_bboxes_ignore, mode='iof')\n                ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)\n            else:\n                ignore_overlaps = self.iou_calculator(\n                    gt_bboxes_ignore, bboxes, mode='iof')\n                ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)\n            overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1\n\n        assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)\n        if assign_on_cpu:\n            assign_result.gt_inds = assign_result.gt_inds.to(device)\n            assign_result.max_overlaps = assign_result.max_overlaps.to(device)\n            if assign_result.labels is not None:\n                assign_result.labels = assign_result.labels.to(device)\n        return assign_result\n\n    def assign_wrt_overlaps(self, overlaps, gt_labels=None):\n        \"\"\"Assign w.r.t. the overlaps of bboxes with gts.\n\n        Args:\n            overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,\n                shape(k, n).\n            gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)\n\n        # 1. assign -1 by default\n        assigned_gt_inds = overlaps.new_full((num_bboxes, ),\n                                             -1,\n                                             dtype=torch.long)\n\n        if num_gts == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = overlaps.new_zeros((num_bboxes, ))\n            if num_gts == 0:\n                # No truth, assign everything to background\n                assigned_gt_inds[:] = 0\n            if gt_labels is None:\n                assigned_labels = None\n            else:\n                assigned_labels = overlaps.new_full((num_bboxes, ),\n                                                    -1,\n                                                    dtype=torch.long)\n            return AssignResult(\n                num_gts,\n                assigned_gt_inds,\n                max_overlaps,\n                labels=assigned_labels)\n\n        # for each anchor, which gt best overlaps with it\n        # for each anchor, the max iou of all gts\n        max_overlaps, argmax_overlaps = overlaps.max(dim=0)\n        # for each gt, which anchor best overlaps with it\n        # for each gt, the max iou of all proposals\n        gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)\n\n        # 2. assign negative: below\n        # the negative inds are set to be 0\n        if isinstance(self.neg_iou_thr, float):\n            assigned_gt_inds[(max_overlaps >= 0)\n                             & (max_overlaps < self.neg_iou_thr)] = 0\n        elif isinstance(self.neg_iou_thr, tuple):\n            assert len(self.neg_iou_thr) == 2\n            assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])\n                             & (max_overlaps < self.neg_iou_thr[1])] = 0\n\n        # 3. assign positive: above positive IoU threshold\n        pos_inds = max_overlaps >= self.pos_iou_thr\n        assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1\n\n        if self.match_low_quality:\n            # Low-quality matching will overwrite the assigned_gt_inds assigned\n            # in Step 3. Thus, the assigned gt might not be the best one for\n            # prediction.\n            # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2,\n            # bbox 1 will be assigned as the best target for bbox A in step 3.\n            # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's\n            # assigned_gt_inds will be overwritten to be bbox 2.\n            # This might be the reason that it is not used in ROI Heads.\n            for i in range(num_gts):\n                if gt_max_overlaps[i] >= self.min_pos_iou:\n                    if self.gt_max_assign_all:\n                        max_iou_inds = overlaps[i, :] == gt_max_overlaps[i]\n                        assigned_gt_inds[max_iou_inds] = i + 1\n                    else:\n                        assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1\n\n        if gt_labels is not None:\n            assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n            pos_inds = torch.nonzero(\n                assigned_gt_inds > 0, as_tuple=False).squeeze()\n            if pos_inds.numel() > 0:\n                assigned_labels[pos_inds] = gt_labels[\n                    assigned_gt_inds[pos_inds] - 1]\n        else:\n            assigned_labels = None\n\n        return AssignResult(\n            num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/point_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@BBOX_ASSIGNERS.register_module()\nclass PointAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each point.\n\n    Each proposals will be assigned with `0`, or a positive integer\n    indicating the ground truth index.\n\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n    \"\"\"\n\n    def __init__(self, scale=4, pos_num=3):\n        self.scale = scale\n        self.pos_num = pos_num\n\n    def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):\n        \"\"\"Assign gt to points.\n\n        This method assign a gt bbox to every points set, each points set\n        will be assigned with  the background_label (-1), or a label number.\n        -1 is background, and semi-positive number is the index (0-based) of\n        assigned gt.\n        The assignment is done in following steps, the order matters.\n\n        1. assign every points to the background_label (-1)\n        2. A point is assigned to some gt bbox if\n            (i) the point is within the k closest points to the gt bbox\n            (ii) the distance between this point and the gt is smaller than\n                other gt bboxes\n\n        Args:\n            points (Tensor): points to be assigned, shape(n, 3) while last\n                dimension stands for (x, y, stride).\n            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n                labelled as `ignored`, e.g., crowd boxes in COCO.\n                NOTE: currently unused.\n            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        num_points = points.shape[0]\n        num_gts = gt_bboxes.shape[0]\n\n        if num_gts == 0 or num_points == 0:\n            # If no truth assign everything to the background\n            assigned_gt_inds = points.new_full((num_points, ),\n                                               0,\n                                               dtype=torch.long)\n            if gt_labels is None:\n                assigned_labels = None\n            else:\n                assigned_labels = points.new_full((num_points, ),\n                                                  -1,\n                                                  dtype=torch.long)\n            return AssignResult(\n                num_gts, assigned_gt_inds, None, labels=assigned_labels)\n\n        points_xy = points[:, :2]\n        points_stride = points[:, 2]\n        points_lvl = torch.log2(\n            points_stride).int()  # [3...,4...,5...,6...,7...]\n        lvl_min, lvl_max = points_lvl.min(), points_lvl.max()\n\n        # assign gt box\n        gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2\n        gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6)\n        scale = self.scale\n        gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) +\n                          torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int()\n        gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max)\n\n        # stores the assigned gt index of each point\n        assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long)\n        # stores the assigned gt dist (to this point) of each point\n        assigned_gt_dist = points.new_full((num_points, ), float('inf'))\n        points_range = torch.arange(points.shape[0])\n\n        for idx in range(num_gts):\n            gt_lvl = gt_bboxes_lvl[idx]\n            # get the index of points in this level\n            lvl_idx = gt_lvl == points_lvl\n            points_index = points_range[lvl_idx]\n            # get the points in this level\n            lvl_points = points_xy[lvl_idx, :]\n            # get the center point of gt\n            gt_point = gt_bboxes_xy[[idx], :]\n            # get width and height of gt\n            gt_wh = gt_bboxes_wh[[idx], :]\n            # compute the distance between gt center and\n            #   all points in this level\n            points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1)\n            # find the nearest k points to gt center in this level\n            min_dist, min_dist_index = torch.topk(\n                points_gt_dist, self.pos_num, largest=False)\n            # the index of nearest k points to gt center in this level\n            min_dist_points_index = points_index[min_dist_index]\n            # The less_than_recorded_index stores the index\n            #   of min_dist that is less then the assigned_gt_dist. Where\n            #   assigned_gt_dist stores the dist from previous assigned gt\n            #   (if exist) to each point.\n            less_than_recorded_index = min_dist < assigned_gt_dist[\n                min_dist_points_index]\n            # The min_dist_points_index stores the index of points satisfy:\n            #   (1) it is k nearest to current gt center in this level.\n            #   (2) it is closer to current gt center than other gt center.\n            min_dist_points_index = min_dist_points_index[\n                less_than_recorded_index]\n            # assign the result\n            assigned_gt_inds[min_dist_points_index] = idx + 1\n            assigned_gt_dist[min_dist_points_index] = min_dist[\n                less_than_recorded_index]\n\n        if gt_labels is not None:\n            assigned_labels = assigned_gt_inds.new_full((num_points, ), -1)\n            pos_inds = torch.nonzero(\n                assigned_gt_inds > 0, as_tuple=False).squeeze()\n            if pos_inds.numel() > 0:\n                assigned_labels[pos_inds] = gt_labels[\n                    assigned_gt_inds[pos_inds] - 1]\n        else:\n            assigned_labels = None\n\n        return AssignResult(\n            num_gts, assigned_gt_inds, None, labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/region_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.core import anchor_inside_flags\nfrom ..builder import BBOX_ASSIGNERS\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\ndef calc_region(bbox, ratio, stride, featmap_size=None):\n    \"\"\"Calculate region of the box defined by the ratio, the ratio is from the\n    center of the box to every edge.\"\"\"\n    # project bbox on the feature\n    f_bbox = bbox / stride\n    x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2])\n    y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3])\n    x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2])\n    y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3])\n    if featmap_size is not None:\n        x1 = x1.clamp(min=0, max=featmap_size[1])\n        y1 = y1.clamp(min=0, max=featmap_size[0])\n        x2 = x2.clamp(min=0, max=featmap_size[1])\n        y2 = y2.clamp(min=0, max=featmap_size[0])\n    return (x1, y1, x2, y2)\n\n\ndef anchor_ctr_inside_region_flags(anchors, stride, region):\n    \"\"\"Get the flag indicate whether anchor centers are inside regions.\"\"\"\n    x1, y1, x2, y2 = region\n    f_anchors = anchors / stride\n    x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5\n    y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5\n    flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2)\n    return flags\n\n\n@BBOX_ASSIGNERS.register_module()\nclass RegionAssigner(BaseAssigner):\n    \"\"\"Assign a corresponding gt bbox or background to each bbox.\n\n    Each proposals will be assigned with `-1`, `0`, or a positive integer\n    indicating the ground truth index.\n\n    - -1: don't care\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    Args:\n        center_ratio: ratio of the region in the center of the bbox to\n            define positive sample.\n        ignore_ratio: ratio of the region to define ignore samples.\n    \"\"\"\n\n    def __init__(self, center_ratio=0.2, ignore_ratio=0.5):\n        self.center_ratio = center_ratio\n        self.ignore_ratio = ignore_ratio\n\n    def assign(self,\n               mlvl_anchors,\n               mlvl_valid_flags,\n               gt_bboxes,\n               img_meta,\n               featmap_sizes,\n               anchor_scale,\n               anchor_strides,\n               gt_bboxes_ignore=None,\n               gt_labels=None,\n               allowed_border=0):\n        \"\"\"Assign gt to anchors.\n\n        This method assign a gt bbox to every bbox (proposal/anchor), each bbox\n        will be assigned with -1, 0, or a positive number. -1 means don't care,\n        0 means negative sample, positive number is the index (1-based) of\n        assigned gt.\n\n        The assignment is done in following steps, and the order matters.\n\n        1. Assign every anchor to 0 (negative)\n        2. (For each gt_bboxes) Compute ignore flags based on ignore_region\n           then assign -1 to anchors w.r.t. ignore flags\n        3. (For each gt_bboxes) Compute pos flags based on center_region then\n           assign gt_bboxes to anchors w.r.t. pos flags\n        4. (For each gt_bboxes) Compute ignore flags based on adjacent anchor\n           level then assign -1 to anchors w.r.t. ignore flags\n        5. Assign anchor outside of image to -1\n\n        Args:\n            mlvl_anchors (list[Tensor]): Multi level anchors.\n            mlvl_valid_flags (list[Tensor]): Multi level valid flags.\n            gt_bboxes (Tensor): Ground truth bboxes of image\n            img_meta (dict): Meta info of image.\n            featmap_sizes (list[Tensor]): Feature mapsize each level\n            anchor_scale (int): Scale of the anchor.\n            anchor_strides (list[int]): Stride of the anchor.\n            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n                labelled as `ignored`, e.g., crowd boxes in COCO.\n            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n            allowed_border (int, optional): The border to allow the valid\n                anchor. Defaults to 0.\n\n        Returns:\n            :obj:`AssignResult`: The assign result.\n        \"\"\"\n        if gt_bboxes_ignore is not None:\n            raise NotImplementedError\n\n        num_gts = gt_bboxes.shape[0]\n        num_bboxes = sum(x.shape[0] for x in mlvl_anchors)\n\n        if num_gts == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = gt_bboxes.new_zeros((num_bboxes, ))\n            assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ),\n                                                   dtype=torch.long)\n            if gt_labels is None:\n                assigned_labels = None\n            else:\n                assigned_labels = gt_bboxes.new_full((num_bboxes, ),\n                                                     -1,\n                                                     dtype=torch.long)\n            return AssignResult(\n                num_gts,\n                assigned_gt_inds,\n                max_overlaps,\n                labels=assigned_labels)\n\n        num_lvls = len(mlvl_anchors)\n        r1 = (1 - self.center_ratio) / 2\n        r2 = (1 - self.ignore_ratio) / 2\n\n        scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *\n                           (gt_bboxes[:, 3] - gt_bboxes[:, 1]))\n        min_anchor_size = scale.new_full(\n            (1, ), float(anchor_scale * anchor_strides[0]))\n        target_lvls = torch.floor(\n            torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)\n        target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()\n\n        # 1. assign 0 (negative) by default\n        mlvl_assigned_gt_inds = []\n        mlvl_ignore_flags = []\n        for lvl in range(num_lvls):\n            h, w = featmap_sizes[lvl]\n            assert h * w == mlvl_anchors[lvl].shape[0]\n            assigned_gt_inds = gt_bboxes.new_full((h * w, ),\n                                                  0,\n                                                  dtype=torch.long)\n            ignore_flags = torch.zeros_like(assigned_gt_inds)\n            mlvl_assigned_gt_inds.append(assigned_gt_inds)\n            mlvl_ignore_flags.append(ignore_flags)\n\n        for gt_id in range(num_gts):\n            lvl = target_lvls[gt_id].item()\n            featmap_size = featmap_sizes[lvl]\n            stride = anchor_strides[lvl]\n            anchors = mlvl_anchors[lvl]\n            gt_bbox = gt_bboxes[gt_id, :4]\n\n            # Compute regions\n            ignore_region = calc_region(gt_bbox, r2, stride, featmap_size)\n            ctr_region = calc_region(gt_bbox, r1, stride, featmap_size)\n\n            # 2. Assign -1 to ignore flags\n            ignore_flags = anchor_ctr_inside_region_flags(\n                anchors, stride, ignore_region)\n            mlvl_assigned_gt_inds[lvl][ignore_flags] = -1\n\n            # 3. Assign gt_bboxes to pos flags\n            pos_flags = anchor_ctr_inside_region_flags(anchors, stride,\n                                                       ctr_region)\n            mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1\n\n            # 4. Assign -1 to ignore adjacent lvl\n            if lvl > 0:\n                d_lvl = lvl - 1\n                d_anchors = mlvl_anchors[d_lvl]\n                d_featmap_size = featmap_sizes[d_lvl]\n                d_stride = anchor_strides[d_lvl]\n                d_ignore_region = calc_region(gt_bbox, r2, d_stride,\n                                              d_featmap_size)\n                ignore_flags = anchor_ctr_inside_region_flags(\n                    d_anchors, d_stride, d_ignore_region)\n                mlvl_ignore_flags[d_lvl][ignore_flags] = 1\n            if lvl < num_lvls - 1:\n                u_lvl = lvl + 1\n                u_anchors = mlvl_anchors[u_lvl]\n                u_featmap_size = featmap_sizes[u_lvl]\n                u_stride = anchor_strides[u_lvl]\n                u_ignore_region = calc_region(gt_bbox, r2, u_stride,\n                                              u_featmap_size)\n                ignore_flags = anchor_ctr_inside_region_flags(\n                    u_anchors, u_stride, u_ignore_region)\n                mlvl_ignore_flags[u_lvl][ignore_flags] = 1\n\n        # 4. (cont.) Assign -1 to ignore adjacent lvl\n        for lvl in range(num_lvls):\n            ignore_flags = mlvl_ignore_flags[lvl]\n            mlvl_assigned_gt_inds[lvl][ignore_flags] = -1\n\n        # 5. Assign -1 to anchor outside of image\n        flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds)\n        flat_anchors = torch.cat(mlvl_anchors)\n        flat_valid_flags = torch.cat(mlvl_valid_flags)\n        assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] ==\n                flat_valid_flags.shape[0])\n        inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags,\n                                           img_meta['img_shape'],\n                                           allowed_border)\n        outside_flags = ~inside_flags\n        flat_assigned_gt_inds[outside_flags] = -1\n\n        if gt_labels is not None:\n            assigned_labels = torch.zeros_like(flat_assigned_gt_inds)\n            pos_flags = assigned_gt_inds > 0\n            assigned_labels[pos_flags] = gt_labels[\n                flat_assigned_gt_inds[pos_flags] - 1]\n        else:\n            assigned_labels = None\n\n        return AssignResult(\n            num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/sim_ota_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculators import bbox_overlaps\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@BBOX_ASSIGNERS.register_module()\nclass SimOTAAssigner(BaseAssigner):\n    \"\"\"Computes matching between predictions and ground truth.\n\n    Args:\n        center_radius (int | float, optional): Ground truth center size\n            to judge whether a prior is in center. Default 2.5.\n        candidate_topk (int, optional): The candidate top-k which used to\n            get top-k ious to calculate dynamic-k. Default 10.\n        iou_weight (int | float, optional): The scale factor for regression\n            iou cost. Default 3.0.\n        cls_weight (int | float, optional): The scale factor for classification\n            cost. Default 1.0.\n    \"\"\"\n\n    def __init__(self,\n                 center_radius=2.5,\n                 candidate_topk=10,\n                 iou_weight=3.0,\n                 cls_weight=1.0):\n        self.center_radius = center_radius\n        self.candidate_topk = candidate_topk\n        self.iou_weight = iou_weight\n        self.cls_weight = cls_weight\n\n    def assign(self,\n               pred_scores,\n               priors,\n               decoded_bboxes,\n               gt_bboxes,\n               gt_labels,\n               gt_bboxes_ignore=None,\n               eps=1e-7):\n        \"\"\"Assign gt to priors using SimOTA. It will switch to CPU mode when\n        GPU is out of memory.\n        Args:\n            pred_scores (Tensor): Classification scores of one image,\n                a 2D-Tensor with shape [num_priors, num_classes]\n            priors (Tensor): All priors of one image, a 2D-Tensor with shape\n                [num_priors, 4] in [cx, xy, stride_w, stride_y] format.\n            decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape\n                [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format.\n            gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor\n                with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (Tensor): Ground truth labels of one image, a Tensor\n                with shape [num_gts].\n            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n                labelled as `ignored`, e.g., crowd boxes in COCO.\n            eps (float): A value added to the denominator for numerical\n                stability. Default 1e-7.\n        Returns:\n            assign_result (obj:`AssignResult`): The assigned result.\n        \"\"\"\n        try:\n            assign_result = self._assign(pred_scores, priors, decoded_bboxes,\n                                         gt_bboxes, gt_labels,\n                                         gt_bboxes_ignore, eps)\n            return assign_result\n        except RuntimeError:\n            origin_device = pred_scores.device\n            warnings.warn('OOM RuntimeError is raised due to the huge memory '\n                          'cost during label assignment. CPU mode is applied '\n                          'in this batch. If you want to avoid this issue, '\n                          'try to reduce the batch size or image size.')\n            torch.cuda.empty_cache()\n\n            pred_scores = pred_scores.cpu()\n            priors = priors.cpu()\n            decoded_bboxes = decoded_bboxes.cpu()\n            gt_bboxes = gt_bboxes.cpu().float()\n            gt_labels = gt_labels.cpu()\n\n            assign_result = self._assign(pred_scores, priors, decoded_bboxes,\n                                         gt_bboxes, gt_labels,\n                                         gt_bboxes_ignore, eps)\n            assign_result.gt_inds = assign_result.gt_inds.to(origin_device)\n            assign_result.max_overlaps = assign_result.max_overlaps.to(\n                origin_device)\n            assign_result.labels = assign_result.labels.to(origin_device)\n\n            return assign_result\n\n    def _assign(self,\n                pred_scores,\n                priors,\n                decoded_bboxes,\n                gt_bboxes,\n                gt_labels,\n                gt_bboxes_ignore=None,\n                eps=1e-7):\n        \"\"\"Assign gt to priors using SimOTA.\n        Args:\n            pred_scores (Tensor): Classification scores of one image,\n                a 2D-Tensor with shape [num_priors, num_classes]\n            priors (Tensor): All priors of one image, a 2D-Tensor with shape\n                [num_priors, 4] in [cx, xy, stride_w, stride_y] format.\n            decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape\n                [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format.\n            gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor\n                with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (Tensor): Ground truth labels of one image, a Tensor\n                with shape [num_gts].\n            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n                labelled as `ignored`, e.g., crowd boxes in COCO.\n            eps (float): A value added to the denominator for numerical\n                stability. Default 1e-7.\n        Returns:\n            :obj:`AssignResult`: The assigned result.\n        \"\"\"\n        INF = 100000.0\n        num_gt = gt_bboxes.size(0)\n        num_bboxes = decoded_bboxes.size(0)\n\n        # assign 0 by default\n        assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ),\n                                                   0,\n                                                   dtype=torch.long)\n        valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info(\n            priors, gt_bboxes)\n        valid_decoded_bbox = decoded_bboxes[valid_mask]\n        valid_pred_scores = pred_scores[valid_mask]\n        num_valid = valid_decoded_bbox.size(0)\n\n        if num_gt == 0 or num_bboxes == 0 or num_valid == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = decoded_bboxes.new_zeros((num_bboxes, ))\n            if num_gt == 0:\n                # No truth, assign everything to background\n                assigned_gt_inds[:] = 0\n            if gt_labels is None:\n                assigned_labels = None\n            else:\n                assigned_labels = decoded_bboxes.new_full((num_bboxes, ),\n                                                          -1,\n                                                          dtype=torch.long)\n            return AssignResult(\n                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n        pairwise_ious = bbox_overlaps(valid_decoded_bbox, gt_bboxes)\n        iou_cost = -torch.log(pairwise_ious + eps)\n\n        gt_onehot_label = (\n            F.one_hot(gt_labels.to(torch.int64),\n                      pred_scores.shape[-1]).float().unsqueeze(0).repeat(\n                          num_valid, 1, 1))\n\n        valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1)\n        cls_cost = (\n            F.binary_cross_entropy(\n                valid_pred_scores.to(dtype=torch.float32).sqrt_(),\n                gt_onehot_label,\n                reduction='none',\n            ).sum(-1).to(dtype=valid_pred_scores.dtype))\n\n        cost_matrix = (\n            cls_cost * self.cls_weight + iou_cost * self.iou_weight +\n            (~is_in_boxes_and_center) * INF)\n\n        matched_pred_ious, matched_gt_inds = \\\n            self.dynamic_k_matching(\n                cost_matrix, pairwise_ious, num_gt, valid_mask)\n\n        # convert to AssignResult format\n        assigned_gt_inds[valid_mask] = matched_gt_inds + 1\n        assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n        assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long()\n        max_overlaps = assigned_gt_inds.new_full((num_bboxes, ),\n                                                 -INF,\n                                                 dtype=torch.float32)\n        max_overlaps[valid_mask] = matched_pred_ious\n        return AssignResult(\n            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n\n    def get_in_gt_and_in_center_info(self, priors, gt_bboxes):\n        num_gt = gt_bboxes.size(0)\n\n        repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt)\n        repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt)\n        repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt)\n        repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt)\n\n        # is prior centers in gt bboxes, shape: [n_prior, n_gt]\n        l_ = repeated_x - gt_bboxes[:, 0]\n        t_ = repeated_y - gt_bboxes[:, 1]\n        r_ = gt_bboxes[:, 2] - repeated_x\n        b_ = gt_bboxes[:, 3] - repeated_y\n\n        deltas = torch.stack([l_, t_, r_, b_], dim=1)\n        is_in_gts = deltas.min(dim=1).values > 0\n        is_in_gts_all = is_in_gts.sum(dim=1) > 0\n\n        # is prior centers in gt centers\n        gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0\n        gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0\n        ct_box_l = gt_cxs - self.center_radius * repeated_stride_x\n        ct_box_t = gt_cys - self.center_radius * repeated_stride_y\n        ct_box_r = gt_cxs + self.center_radius * repeated_stride_x\n        ct_box_b = gt_cys + self.center_radius * repeated_stride_y\n\n        cl_ = repeated_x - ct_box_l\n        ct_ = repeated_y - ct_box_t\n        cr_ = ct_box_r - repeated_x\n        cb_ = ct_box_b - repeated_y\n\n        ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1)\n        is_in_cts = ct_deltas.min(dim=1).values > 0\n        is_in_cts_all = is_in_cts.sum(dim=1) > 0\n\n        # in boxes or in centers, shape: [num_priors]\n        is_in_gts_or_centers = is_in_gts_all | is_in_cts_all\n\n        # both in boxes and centers, shape: [num_fg, num_gt]\n        is_in_boxes_and_centers = (\n            is_in_gts[is_in_gts_or_centers, :]\n            & is_in_cts[is_in_gts_or_centers, :])\n        return is_in_gts_or_centers, is_in_boxes_and_centers\n\n    def dynamic_k_matching(self, cost, pairwise_ious, num_gt, valid_mask):\n        matching_matrix = torch.zeros_like(cost, dtype=torch.uint8)\n        # select candidate topk ious for dynamic-k calculation\n        candidate_topk = min(self.candidate_topk, pairwise_ious.size(0))\n        topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0)\n        # calculate dynamic k for each gt\n        dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)\n        for gt_idx in range(num_gt):\n            _, pos_idx = torch.topk(\n                cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False)\n            matching_matrix[:, gt_idx][pos_idx] = 1\n\n        del topk_ious, dynamic_ks, pos_idx\n\n        prior_match_gt_mask = matching_matrix.sum(1) > 1\n        if prior_match_gt_mask.sum() > 0:\n            cost_min, cost_argmin = torch.min(\n                cost[prior_match_gt_mask, :], dim=1)\n            matching_matrix[prior_match_gt_mask, :] *= 0\n            matching_matrix[prior_match_gt_mask, cost_argmin] = 1\n        # get foreground mask inside box and center prior\n        fg_mask_inboxes = matching_matrix.sum(1) > 0\n        valid_mask[valid_mask.clone()] = fg_mask_inboxes\n\n        matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1)\n        matched_pred_ious = (matching_matrix *\n                             pairwise_ious).sum(1)[fg_mask_inboxes]\n        return matched_pred_ious, matched_gt_inds\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/task_aligned_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculators import build_iou_calculator\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\nINF = 100000000\n\n\n@BBOX_ASSIGNERS.register_module()\nclass TaskAlignedAssigner(BaseAssigner):\n    \"\"\"Task aligned assigner used in the paper:\n    `TOOD: Task-aligned One-stage Object Detection.\n    <https://arxiv.org/abs/2108.07755>`_.\n\n    Assign a corresponding gt bbox or background to each predicted bbox.\n    Each bbox will be assigned with `0` or a positive integer\n    indicating the ground truth index.\n\n    - 0: negative sample, no assigned gt\n    - positive integer: positive sample, index (1-based) of assigned gt\n\n    Args:\n        topk (int): number of bbox selected in each level\n        iou_calculator (dict): Config dict for iou calculator.\n            Default: dict(type='BboxOverlaps2D')\n    \"\"\"\n\n    def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D')):\n        assert topk >= 1\n        self.topk = topk\n        self.iou_calculator = build_iou_calculator(iou_calculator)\n\n    def assign(self,\n               pred_scores,\n               decode_bboxes,\n               anchors,\n               gt_bboxes,\n               gt_bboxes_ignore=None,\n               gt_labels=None,\n               alpha=1,\n               beta=6):\n        \"\"\"Assign gt to bboxes.\n\n        The assignment is done in following steps\n\n        1. compute alignment metric between all bbox (bbox of all pyramid\n           levels) and gt\n        2. select top-k bbox as candidates for each gt\n        3. limit the positive sample's center in gt (because the anchor-free\n           detector only can predict positive distance)\n\n\n        Args:\n            pred_scores (Tensor): predicted class probability,\n                shape(n, num_classes)\n            decode_bboxes (Tensor): predicted bounding boxes, shape(n, 4)\n            anchors (Tensor): pre-defined anchors, shape(n, 4).\n            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).\n            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are\n                labelled as `ignored`, e.g., crowd boxes in COCO.\n            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).\n\n        Returns:\n            :obj:`TaskAlignedAssignResult`: The assign result.\n        \"\"\"\n        anchors = anchors[:, :4]\n        num_gt, num_bboxes = gt_bboxes.size(0), anchors.size(0)\n        # compute alignment metric between all bbox and gt\n        overlaps = self.iou_calculator(decode_bboxes, gt_bboxes).detach()\n        bbox_scores = pred_scores[:, gt_labels].detach()\n        # assign 0 by default\n        assigned_gt_inds = anchors.new_full((num_bboxes, ),\n                                            0,\n                                            dtype=torch.long)\n        assign_metrics = anchors.new_zeros((num_bboxes, ))\n\n        if num_gt == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            max_overlaps = anchors.new_zeros((num_bboxes, ))\n            if num_gt == 0:\n                # No gt boxes, assign everything to background\n                assigned_gt_inds[:] = 0\n            if gt_labels is None:\n                assigned_labels = None\n            else:\n                assigned_labels = anchors.new_full((num_bboxes, ),\n                                                   -1,\n                                                   dtype=torch.long)\n            assign_result = AssignResult(\n                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n            assign_result.assign_metrics = assign_metrics\n            return assign_result\n\n        # select top-k bboxes as candidates for each gt\n        alignment_metrics = bbox_scores**alpha * overlaps**beta\n        topk = min(self.topk, alignment_metrics.size(0))\n        _, candidate_idxs = alignment_metrics.topk(topk, dim=0, largest=True)\n        candidate_metrics = alignment_metrics[candidate_idxs,\n                                              torch.arange(num_gt)]\n        is_pos = candidate_metrics > 0\n\n        # limit the positive sample's center in gt\n        anchors_cx = (anchors[:, 0] + anchors[:, 2]) / 2.0\n        anchors_cy = (anchors[:, 1] + anchors[:, 3]) / 2.0\n        for gt_idx in range(num_gt):\n            candidate_idxs[:, gt_idx] += gt_idx * num_bboxes\n        ep_anchors_cx = anchors_cx.view(1, -1).expand(\n            num_gt, num_bboxes).contiguous().view(-1)\n        ep_anchors_cy = anchors_cy.view(1, -1).expand(\n            num_gt, num_bboxes).contiguous().view(-1)\n        candidate_idxs = candidate_idxs.view(-1)\n\n        # calculate the left, top, right, bottom distance between positive\n        # bbox center and gt side\n        l_ = ep_anchors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]\n        t_ = ep_anchors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]\n        r_ = gt_bboxes[:, 2] - ep_anchors_cx[candidate_idxs].view(-1, num_gt)\n        b_ = gt_bboxes[:, 3] - ep_anchors_cy[candidate_idxs].view(-1, num_gt)\n        is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01\n        is_pos = is_pos & is_in_gts\n\n        # if an anchor box is assigned to multiple gts,\n        # the one with the highest iou will be selected.\n        overlaps_inf = torch.full_like(overlaps,\n                                       -INF).t().contiguous().view(-1)\n        index = candidate_idxs.view(-1)[is_pos.view(-1)]\n        overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]\n        overlaps_inf = overlaps_inf.view(num_gt, -1).t()\n\n        max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)\n        assigned_gt_inds[\n            max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1\n        assign_metrics[max_overlaps != -INF] = alignment_metrics[\n            max_overlaps != -INF, argmax_overlaps[max_overlaps != -INF]]\n\n        if gt_labels is not None:\n            assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n            pos_inds = torch.nonzero(\n                assigned_gt_inds > 0, as_tuple=False).squeeze()\n            if pos_inds.numel() > 0:\n                assigned_labels[pos_inds] = gt_labels[\n                    assigned_gt_inds[pos_inds] - 1]\n        else:\n            assigned_labels = None\n        assign_result = AssignResult(\n            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)\n        assign_result.assign_metrics = assign_metrics\n        return assign_result\n"
  },
  {
    "path": "mmdet/core/bbox/assigners/uniform_assigner.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculators import build_iou_calculator\nfrom ..transforms import bbox_xyxy_to_cxcywh\nfrom .assign_result import AssignResult\nfrom .base_assigner import BaseAssigner\n\n\n@BBOX_ASSIGNERS.register_module()\nclass UniformAssigner(BaseAssigner):\n    \"\"\"Uniform Matching between the anchors and gt boxes, which can achieve\n    balance in positive anchors, and gt_bboxes_ignore was not considered for\n    now.\n\n    Args:\n        pos_ignore_thr (float): the threshold to ignore positive anchors\n        neg_ignore_thr (float): the threshold to ignore negative anchors\n        match_times(int): Number of positive anchors for each gt box.\n           Default 4.\n        iou_calculator (dict): iou_calculator config\n    \"\"\"\n\n    def __init__(self,\n                 pos_ignore_thr,\n                 neg_ignore_thr,\n                 match_times=4,\n                 iou_calculator=dict(type='BboxOverlaps2D')):\n        self.match_times = match_times\n        self.pos_ignore_thr = pos_ignore_thr\n        self.neg_ignore_thr = neg_ignore_thr\n        self.iou_calculator = build_iou_calculator(iou_calculator)\n\n    def assign(self,\n               bbox_pred,\n               anchor,\n               gt_bboxes,\n               gt_bboxes_ignore=None,\n               gt_labels=None):\n        num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)\n\n        # 1. assign -1 by default\n        assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),\n                                              0,\n                                              dtype=torch.long)\n        assigned_labels = bbox_pred.new_full((num_bboxes, ),\n                                             -1,\n                                             dtype=torch.long)\n        if num_gts == 0 or num_bboxes == 0:\n            # No ground truth or boxes, return empty assignment\n            if num_gts == 0:\n                # No ground truth, assign all to background\n                assigned_gt_inds[:] = 0\n            assign_result = AssignResult(\n                num_gts, assigned_gt_inds, None, labels=assigned_labels)\n            assign_result.set_extra_property(\n                'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool))\n            assign_result.set_extra_property('pos_predicted_boxes',\n                                             bbox_pred.new_empty((0, 4)))\n            assign_result.set_extra_property('target_boxes',\n                                             bbox_pred.new_empty((0, 4)))\n            return assign_result\n\n        # 2. Compute the L1 cost between boxes\n        # Note that we use anchors and predict boxes both\n        cost_bbox = torch.cdist(\n            bbox_xyxy_to_cxcywh(bbox_pred),\n            bbox_xyxy_to_cxcywh(gt_bboxes),\n            p=1)\n        cost_bbox_anchors = torch.cdist(\n            bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1)\n\n        # We found that topk function has different results in cpu and\n        # cuda mode. In order to ensure consistency with the source code,\n        # we also use cpu mode.\n        # TODO: Check whether the performance of cpu and cuda are the same.\n        C = cost_bbox.cpu()\n        C1 = cost_bbox_anchors.cpu()\n\n        # self.match_times x n\n        index = torch.topk(\n            C,  # c=b,n,x c[i]=n,x\n            k=self.match_times,\n            dim=0,\n            largest=False)[1]\n\n        # self.match_times x n\n        index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1]\n        # (self.match_times*2) x n\n        indexes = torch.cat((index, index1),\n                            dim=1).reshape(-1).to(bbox_pred.device)\n\n        pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes)\n        anchor_overlaps = self.iou_calculator(anchor, gt_bboxes)\n        pred_max_overlaps, _ = pred_overlaps.max(dim=1)\n        anchor_max_overlaps, _ = anchor_overlaps.max(dim=0)\n\n        # 3. Compute the ignore indexes use gt_bboxes and predict boxes\n        ignore_idx = pred_max_overlaps > self.neg_ignore_thr\n        assigned_gt_inds[ignore_idx] = -1\n\n        # 4. Compute the ignore indexes of positive sample use anchors\n        # and predict boxes\n        pos_gt_index = torch.arange(\n            0, C1.size(1),\n            device=bbox_pred.device).repeat(self.match_times * 2)\n        pos_ious = anchor_overlaps[indexes, pos_gt_index]\n        pos_ignore_idx = pos_ious < self.pos_ignore_thr\n\n        pos_gt_index_with_ignore = pos_gt_index + 1\n        pos_gt_index_with_ignore[pos_ignore_idx] = -1\n        assigned_gt_inds[indexes] = pos_gt_index_with_ignore\n\n        if gt_labels is not None:\n            assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)\n            pos_inds = torch.nonzero(\n                assigned_gt_inds > 0, as_tuple=False).squeeze()\n            if pos_inds.numel() > 0:\n                assigned_labels[pos_inds] = gt_labels[\n                    assigned_gt_inds[pos_inds] - 1]\n        else:\n            assigned_labels = None\n\n        assign_result = AssignResult(\n            num_gts,\n            assigned_gt_inds,\n            anchor_max_overlaps,\n            labels=assigned_labels)\n        assign_result.set_extra_property('pos_idx', ~pos_ignore_idx)\n        assign_result.set_extra_property('pos_predicted_boxes',\n                                         bbox_pred[indexes])\n        assign_result.set_extra_property('target_boxes',\n                                         gt_bboxes[pos_gt_index])\n        return assign_result\n"
  },
  {
    "path": "mmdet/core/bbox/builder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.utils import Registry, build_from_cfg\n\nBBOX_ASSIGNERS = Registry('bbox_assigner')\nBBOX_SAMPLERS = Registry('bbox_sampler')\nBBOX_CODERS = Registry('bbox_coder')\n\n\ndef build_assigner(cfg, **default_args):\n    \"\"\"Builder of box assigner.\"\"\"\n    return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args)\n\n\ndef build_sampler(cfg, **default_args):\n    \"\"\"Builder of box sampler.\"\"\"\n    return build_from_cfg(cfg, BBOX_SAMPLERS, default_args)\n\n\ndef build_bbox_coder(cfg, **default_args):\n    \"\"\"Builder of box coder.\"\"\"\n    return build_from_cfg(cfg, BBOX_CODERS, default_args)\n"
  },
  {
    "path": "mmdet/core/bbox/coder/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_bbox_coder import BaseBBoxCoder\nfrom .bucketing_bbox_coder import BucketingBBoxCoder\nfrom .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder\nfrom .distance_point_bbox_coder import DistancePointBBoxCoder\nfrom .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder\nfrom .pseudo_bbox_coder import PseudoBBoxCoder\nfrom .tblr_bbox_coder import TBLRBBoxCoder\nfrom .yolo_bbox_coder import YOLOBBoxCoder\n\n__all__ = [\n    'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder',\n    'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder',\n    'BucketingBBoxCoder', 'DistancePointBBoxCoder'\n]\n"
  },
  {
    "path": "mmdet/core/bbox/coder/base_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\n\nclass BaseBBoxCoder(metaclass=ABCMeta):\n    \"\"\"Base bounding box coder.\"\"\"\n\n    def __init__(self, **kwargs):\n        pass\n\n    @abstractmethod\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Encode deltas between bboxes and ground truth boxes.\"\"\"\n\n    @abstractmethod\n    def decode(self, bboxes, bboxes_pred):\n        \"\"\"Decode the predicted bboxes according to prediction and base\n        boxes.\"\"\"\n"
  },
  {
    "path": "mmdet/core/bbox/coder/bucketing_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom ..builder import BBOX_CODERS\nfrom ..transforms import bbox_rescale\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@BBOX_CODERS.register_module()\nclass BucketingBBoxCoder(BaseBBoxCoder):\n    \"\"\"Bucketing BBox Coder for Side-Aware Boundary Localization (SABL).\n\n    Boundary Localization with Bucketing and Bucketing Guided Rescoring\n    are implemented here.\n\n    Please refer to https://arxiv.org/abs/1912.04260 for more details.\n\n    Args:\n        num_buckets (int): Number of buckets.\n        scale_factor (int): Scale factor of proposals to generate buckets.\n        offset_topk (int): Topk buckets are used to generate\n             bucket fine regression targets. Defaults to 2.\n        offset_upperbound (float): Offset upperbound to generate\n             bucket fine regression targets.\n             To avoid too large offset displacements. Defaults to 1.0.\n        cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.\n             Defaults to True.\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 num_buckets,\n                 scale_factor,\n                 offset_topk=2,\n                 offset_upperbound=1.0,\n                 cls_ignore_neighbor=True,\n                 clip_border=True):\n        super(BucketingBBoxCoder, self).__init__()\n        self.num_buckets = num_buckets\n        self.scale_factor = scale_factor\n        self.offset_topk = offset_topk\n        self.offset_upperbound = offset_upperbound\n        self.cls_ignore_neighbor = cls_ignore_neighbor\n        self.clip_border = clip_border\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Get bucketing estimation and fine regression targets during\n        training.\n\n        Args:\n            bboxes (torch.Tensor): source boxes, e.g., object proposals.\n            gt_bboxes (torch.Tensor): target of the transformation, e.g.,\n                ground truth boxes.\n\n        Returns:\n           encoded_bboxes(tuple[Tensor]): bucketing estimation\n            and fine regression targets and weights\n        \"\"\"\n\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets,\n                                     self.scale_factor, self.offset_topk,\n                                     self.offset_upperbound,\n                                     self.cls_ignore_neighbor)\n        return encoded_bboxes\n\n    def decode(self, bboxes, pred_bboxes, max_shape=None):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n        Args:\n            boxes (torch.Tensor): Basic boxes.\n            pred_bboxes (torch.Tensor): Predictions for bucketing estimation\n                and fine regression\n            max_shape (tuple[int], optional): Maximum shape of boxes.\n                Defaults to None.\n\n        Returns:\n            torch.Tensor: Decoded boxes.\n        \"\"\"\n        assert len(pred_bboxes) == 2\n        cls_preds, offset_preds = pred_bboxes\n        assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size(\n            0) == bboxes.size(0)\n        decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds,\n                                     self.num_buckets, self.scale_factor,\n                                     max_shape, self.clip_border)\n\n        return decoded_bboxes\n\n\n@mmcv.jit(coderize=True)\ndef generat_buckets(proposals, num_buckets, scale_factor=1.0):\n    \"\"\"Generate buckets w.r.t bucket number and scale factor of proposals.\n\n    Args:\n        proposals (Tensor): Shape (n, 4)\n        num_buckets (int): Number of buckets.\n        scale_factor (float): Scale factor to rescale proposals.\n\n    Returns:\n        tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets,\n         t_buckets, d_buckets)\n\n            - bucket_w: Width of buckets on x-axis. Shape (n, ).\n            - bucket_h: Height of buckets on y-axis. Shape (n, ).\n            - l_buckets: Left buckets. Shape (n, ceil(side_num/2)).\n            - r_buckets: Right buckets. Shape (n, ceil(side_num/2)).\n            - t_buckets: Top buckets. Shape (n, ceil(side_num/2)).\n            - d_buckets: Down buckets. Shape (n, ceil(side_num/2)).\n    \"\"\"\n    proposals = bbox_rescale(proposals, scale_factor)\n\n    # number of buckets in each side\n    side_num = int(np.ceil(num_buckets / 2.0))\n    pw = proposals[..., 2] - proposals[..., 0]\n    ph = proposals[..., 3] - proposals[..., 1]\n    px1 = proposals[..., 0]\n    py1 = proposals[..., 1]\n    px2 = proposals[..., 2]\n    py2 = proposals[..., 3]\n\n    bucket_w = pw / num_buckets\n    bucket_h = ph / num_buckets\n\n    # left buckets\n    l_buckets = px1[:, None] + (0.5 + torch.arange(\n        0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]\n    # right buckets\n    r_buckets = px2[:, None] - (0.5 + torch.arange(\n        0, side_num).to(proposals).float())[None, :] * bucket_w[:, None]\n    # top buckets\n    t_buckets = py1[:, None] + (0.5 + torch.arange(\n        0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]\n    # down buckets\n    d_buckets = py2[:, None] - (0.5 + torch.arange(\n        0, side_num).to(proposals).float())[None, :] * bucket_h[:, None]\n    return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets\n\n\n@mmcv.jit(coderize=True)\ndef bbox2bucket(proposals,\n                gt,\n                num_buckets,\n                scale_factor,\n                offset_topk=2,\n                offset_upperbound=1.0,\n                cls_ignore_neighbor=True):\n    \"\"\"Generate buckets estimation and fine regression targets.\n\n    Args:\n        proposals (Tensor): Shape (n, 4)\n        gt (Tensor): Shape (n, 4)\n        num_buckets (int): Number of buckets.\n        scale_factor (float): Scale factor to rescale proposals.\n        offset_topk (int): Topk buckets are used to generate\n             bucket fine regression targets. Defaults to 2.\n        offset_upperbound (float): Offset allowance to generate\n             bucket fine regression targets.\n             To avoid too large offset displacements. Defaults to 1.0.\n        cls_ignore_neighbor (bool): Ignore second nearest bucket or Not.\n             Defaults to True.\n\n    Returns:\n        tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights).\n\n            - offsets: Fine regression targets. \\\n                Shape (n, num_buckets*2).\n            - offsets_weights: Fine regression weights. \\\n                Shape (n, num_buckets*2).\n            - bucket_labels: Bucketing estimation labels. \\\n                Shape (n, num_buckets*2).\n            - cls_weights: Bucketing estimation weights. \\\n                Shape (n, num_buckets*2).\n    \"\"\"\n    assert proposals.size() == gt.size()\n\n    # generate buckets\n    proposals = proposals.float()\n    gt = gt.float()\n    (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets,\n     d_buckets) = generat_buckets(proposals, num_buckets, scale_factor)\n\n    gx1 = gt[..., 0]\n    gy1 = gt[..., 1]\n    gx2 = gt[..., 2]\n    gy2 = gt[..., 3]\n\n    # generate offset targets and weights\n    # offsets from buckets to gts\n    l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None]\n    r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None]\n    t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None]\n    d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None]\n\n    # select top-k nearest buckets\n    l_topk, l_label = l_offsets.abs().topk(\n        offset_topk, dim=1, largest=False, sorted=True)\n    r_topk, r_label = r_offsets.abs().topk(\n        offset_topk, dim=1, largest=False, sorted=True)\n    t_topk, t_label = t_offsets.abs().topk(\n        offset_topk, dim=1, largest=False, sorted=True)\n    d_topk, d_label = d_offsets.abs().topk(\n        offset_topk, dim=1, largest=False, sorted=True)\n\n    offset_l_weights = l_offsets.new_zeros(l_offsets.size())\n    offset_r_weights = r_offsets.new_zeros(r_offsets.size())\n    offset_t_weights = t_offsets.new_zeros(t_offsets.size())\n    offset_d_weights = d_offsets.new_zeros(d_offsets.size())\n    inds = torch.arange(0, proposals.size(0)).to(proposals).long()\n\n    # generate offset weights of top-k nearest buckets\n    for k in range(offset_topk):\n        if k >= 1:\n            offset_l_weights[inds, l_label[:,\n                                           k]] = (l_topk[:, k] <\n                                                  offset_upperbound).float()\n            offset_r_weights[inds, r_label[:,\n                                           k]] = (r_topk[:, k] <\n                                                  offset_upperbound).float()\n            offset_t_weights[inds, t_label[:,\n                                           k]] = (t_topk[:, k] <\n                                                  offset_upperbound).float()\n            offset_d_weights[inds, d_label[:,\n                                           k]] = (d_topk[:, k] <\n                                                  offset_upperbound).float()\n        else:\n            offset_l_weights[inds, l_label[:, k]] = 1.0\n            offset_r_weights[inds, r_label[:, k]] = 1.0\n            offset_t_weights[inds, t_label[:, k]] = 1.0\n            offset_d_weights[inds, d_label[:, k]] = 1.0\n\n    offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1)\n    offsets_weights = torch.cat([\n        offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights\n    ],\n                                dim=-1)\n\n    # generate bucket labels and weight\n    side_num = int(np.ceil(num_buckets / 2.0))\n    labels = torch.stack(\n        [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1)\n\n    batch_size = labels.size(0)\n    bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size,\n                                                              -1).float()\n    bucket_cls_l_weights = (l_offsets.abs() < 1).float()\n    bucket_cls_r_weights = (r_offsets.abs() < 1).float()\n    bucket_cls_t_weights = (t_offsets.abs() < 1).float()\n    bucket_cls_d_weights = (d_offsets.abs() < 1).float()\n    bucket_cls_weights = torch.cat([\n        bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights,\n        bucket_cls_d_weights\n    ],\n                                   dim=-1)\n    # ignore second nearest buckets for cls if necessary\n    if cls_ignore_neighbor:\n        bucket_cls_weights = (~((bucket_cls_weights == 1) &\n                                (bucket_labels == 0))).float()\n    else:\n        bucket_cls_weights[:] = 1.0\n    return offsets, offsets_weights, bucket_labels, bucket_cls_weights\n\n\n@mmcv.jit(coderize=True)\ndef bucket2bbox(proposals,\n                cls_preds,\n                offset_preds,\n                num_buckets,\n                scale_factor=1.0,\n                max_shape=None,\n                clip_border=True):\n    \"\"\"Apply bucketing estimation (cls preds) and fine regression (offset\n    preds) to generate det bboxes.\n\n    Args:\n        proposals (Tensor): Boxes to be transformed. Shape (n, 4)\n        cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2).\n        offset_preds (Tensor): fine regression. Shape (n, num_buckets*2).\n        num_buckets (int): Number of buckets.\n        scale_factor (float): Scale factor to rescale proposals.\n        max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n\n    Returns:\n        tuple[Tensor]: (bboxes, loc_confidence).\n\n            - bboxes: predicted bboxes. Shape (n, 4)\n            - loc_confidence: localization confidence of predicted bboxes.\n                Shape (n,).\n    \"\"\"\n\n    side_num = int(np.ceil(num_buckets / 2.0))\n    cls_preds = cls_preds.view(-1, side_num)\n    offset_preds = offset_preds.view(-1, side_num)\n\n    scores = F.softmax(cls_preds, dim=1)\n    score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True)\n\n    rescaled_proposals = bbox_rescale(proposals, scale_factor)\n\n    pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0]\n    ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1]\n    px1 = rescaled_proposals[..., 0]\n    py1 = rescaled_proposals[..., 1]\n    px2 = rescaled_proposals[..., 2]\n    py2 = rescaled_proposals[..., 3]\n\n    bucket_w = pw / num_buckets\n    bucket_h = ph / num_buckets\n\n    score_inds_l = score_label[0::4, 0]\n    score_inds_r = score_label[1::4, 0]\n    score_inds_t = score_label[2::4, 0]\n    score_inds_d = score_label[3::4, 0]\n    l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w\n    r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w\n    t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h\n    d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h\n\n    offsets = offset_preds.view(-1, 4, side_num)\n    inds = torch.arange(proposals.size(0)).to(proposals).long()\n    l_offsets = offsets[:, 0, :][inds, score_inds_l]\n    r_offsets = offsets[:, 1, :][inds, score_inds_r]\n    t_offsets = offsets[:, 2, :][inds, score_inds_t]\n    d_offsets = offsets[:, 3, :][inds, score_inds_d]\n\n    x1 = l_buckets - l_offsets * bucket_w\n    x2 = r_buckets - r_offsets * bucket_w\n    y1 = t_buckets - t_offsets * bucket_h\n    y2 = d_buckets - d_offsets * bucket_h\n\n    if clip_border and max_shape is not None:\n        x1 = x1.clamp(min=0, max=max_shape[1] - 1)\n        y1 = y1.clamp(min=0, max=max_shape[0] - 1)\n        x2 = x2.clamp(min=0, max=max_shape[1] - 1)\n        y2 = y2.clamp(min=0, max=max_shape[0] - 1)\n    bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]],\n                       dim=-1)\n\n    # bucketing guided rescoring\n    loc_confidence = score_topk[:, 0]\n    top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1\n    loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float()\n    loc_confidence = loc_confidence.view(-1, 4).mean(dim=1)\n\n    return bboxes, loc_confidence\n"
  },
  {
    "path": "mmdet/core/bbox/coder/delta_xywh_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport mmcv\nimport numpy as np\nimport torch\n\nfrom ..builder import BBOX_CODERS\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@BBOX_CODERS.register_module()\nclass DeltaXYWHBBoxCoder(BaseBBoxCoder):\n    \"\"\"Delta XYWH BBox coder.\n\n    Following the practice in `R-CNN <https://arxiv.org/abs/1311.2524>`_,\n    this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and\n    decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2).\n\n    Args:\n        target_means (Sequence[float]): Denormalizing means of target for\n            delta coordinates\n        target_stds (Sequence[float]): Denormalizing standard deviation of\n            target for delta coordinates\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n        add_ctr_clamp (bool): Whether to add center clamp, when added, the\n            predicted box is clamped is its center is too far away from\n            the original anchor's center. Only used by YOLOF. Default False.\n        ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.\n            Default 32.\n    \"\"\"\n\n    def __init__(self,\n                 target_means=(0., 0., 0., 0.),\n                 target_stds=(1., 1., 1., 1.),\n                 clip_border=True,\n                 add_ctr_clamp=False,\n                 ctr_clamp=32):\n        super(BaseBBoxCoder, self).__init__()\n        self.means = target_means\n        self.stds = target_stds\n        self.clip_border = clip_border\n        self.add_ctr_clamp = add_ctr_clamp\n        self.ctr_clamp = ctr_clamp\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Get box regression transformation deltas that can be used to\n        transform the ``bboxes`` into the ``gt_bboxes``.\n\n        Args:\n            bboxes (torch.Tensor): Source boxes, e.g., object proposals.\n            gt_bboxes (torch.Tensor): Target of the transformation, e.g.,\n                ground-truth boxes.\n\n        Returns:\n            torch.Tensor: Box transformation deltas\n        \"\"\"\n\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds)\n        return encoded_bboxes\n\n    def decode(self,\n               bboxes,\n               pred_bboxes,\n               max_shape=None,\n               wh_ratio_clip=16 / 1000):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n        Args:\n            bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4)\n            pred_bboxes (Tensor): Encoded offsets with respect to each roi.\n               Has shape (B, N, num_classes * 4) or (B, N, 4) or\n               (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H\n               when rois is a grid of anchors.Offset encoding follows [1]_.\n            max_shape (Sequence[int] or torch.Tensor or Sequence[\n               Sequence[int]],optional): Maximum bounds for boxes, specifies\n               (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then\n               the max_shape should be a Sequence[Sequence[int]]\n               and the length of max_shape should also be B.\n            wh_ratio_clip (float, optional): The allowed ratio between\n                width and height.\n\n        Returns:\n            torch.Tensor: Decoded boxes.\n        \"\"\"\n\n        assert pred_bboxes.size(0) == bboxes.size(0)\n        if pred_bboxes.ndim == 3:\n            assert pred_bboxes.size(1) == bboxes.size(1)\n\n        if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export():\n            # single image decode\n            decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means,\n                                        self.stds, max_shape, wh_ratio_clip,\n                                        self.clip_border, self.add_ctr_clamp,\n                                        self.ctr_clamp)\n        else:\n            if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export():\n                warnings.warn(\n                    'DeprecationWarning: onnx_delta2bbox is deprecated '\n                    'in the case of batch decoding and non-ONNX, '\n                    'please use “delta2bbox” instead. In order to improve '\n                    'the decoding speed, the batch function will no '\n                    'longer be supported. ')\n            decoded_bboxes = onnx_delta2bbox(bboxes, pred_bboxes, self.means,\n                                             self.stds, max_shape,\n                                             wh_ratio_clip, self.clip_border,\n                                             self.add_ctr_clamp,\n                                             self.ctr_clamp)\n\n        return decoded_bboxes\n\n\n@mmcv.jit(coderize=True)\ndef bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)):\n    \"\"\"Compute deltas of proposals w.r.t. gt.\n\n    We usually compute the deltas of x, y, w, h of proposals w.r.t ground\n    truth bboxes to get regression target.\n    This is the inverse function of :func:`delta2bbox`.\n\n    Args:\n        proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)\n        gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)\n        means (Sequence[float]): Denormalizing means for delta coordinates\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates\n\n    Returns:\n        Tensor: deltas with shape (N, 4), where columns represent dx, dy,\n            dw, dh.\n    \"\"\"\n    assert proposals.size() == gt.size()\n\n    proposals = proposals.float()\n    gt = gt.float()\n    px = (proposals[..., 0] + proposals[..., 2]) * 0.5\n    py = (proposals[..., 1] + proposals[..., 3]) * 0.5\n    pw = proposals[..., 2] - proposals[..., 0]\n    ph = proposals[..., 3] - proposals[..., 1]\n\n    gx = (gt[..., 0] + gt[..., 2]) * 0.5\n    gy = (gt[..., 1] + gt[..., 3]) * 0.5\n    gw = gt[..., 2] - gt[..., 0]\n    gh = gt[..., 3] - gt[..., 1]\n\n    dx = (gx - px) / pw\n    dy = (gy - py) / ph\n    dw = torch.log(gw / pw)\n    dh = torch.log(gh / ph)\n    deltas = torch.stack([dx, dy, dw, dh], dim=-1)\n\n    means = deltas.new_tensor(means).unsqueeze(0)\n    stds = deltas.new_tensor(stds).unsqueeze(0)\n    deltas = deltas.sub_(means).div_(stds)\n\n    return deltas\n\n\n@mmcv.jit(coderize=True)\ndef delta2bbox(rois,\n               deltas,\n               means=(0., 0., 0., 0.),\n               stds=(1., 1., 1., 1.),\n               max_shape=None,\n               wh_ratio_clip=16 / 1000,\n               clip_border=True,\n               add_ctr_clamp=False,\n               ctr_clamp=32):\n    \"\"\"Apply deltas to shift/scale base boxes.\n\n    Typically the rois are anchor or proposed bounding boxes and the deltas are\n    network outputs used to shift/scale those boxes.\n    This is the inverse function of :func:`bbox2delta`.\n\n    Args:\n        rois (Tensor): Boxes to be transformed. Has shape (N, 4).\n        deltas (Tensor): Encoded offsets relative to each roi.\n            Has shape (N, num_classes * 4) or (N, 4). Note\n            N = num_base_anchors * W * H, when rois is a grid of\n            anchors. Offset encoding follows [1]_.\n        means (Sequence[float]): Denormalizing means for delta coordinates.\n            Default (0., 0., 0., 0.).\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates. Default (1., 1., 1., 1.).\n        max_shape (tuple[int, int]): Maximum bounds for boxes, specifies\n           (H, W). Default None.\n        wh_ratio_clip (float): Maximum aspect ratio for boxes. Default\n            16 / 1000.\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Default True.\n        add_ctr_clamp (bool): Whether to add center clamp. When set to True,\n            the center of the prediction bounding box will be clamped to\n            avoid being too far away from the center of the anchor.\n            Only used by YOLOF. Default False.\n        ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.\n            Default 32.\n\n    Returns:\n        Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4\n           represent tl_x, tl_y, br_x, br_y.\n\n    References:\n        .. [1] https://arxiv.org/abs/1311.2524\n\n    Example:\n        >>> rois = torch.Tensor([[ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 5.,  5.,  5.,  5.]])\n        >>> deltas = torch.Tensor([[  0.,   0.,   0.,   0.],\n        >>>                        [  1.,   1.,   1.,   1.],\n        >>>                        [  0.,   0.,   2.,  -1.],\n        >>>                        [ 0.7, -1.9, -0.5,  0.3]])\n        >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))\n        tensor([[0.0000, 0.0000, 1.0000, 1.0000],\n                [0.1409, 0.1409, 2.8591, 2.8591],\n                [0.0000, 0.3161, 4.1945, 0.6839],\n                [5.0000, 5.0000, 5.0000, 5.0000]])\n    \"\"\"\n    num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4\n    if num_bboxes == 0:\n        return deltas\n\n    deltas = deltas.reshape(-1, 4)\n\n    means = deltas.new_tensor(means).view(1, -1)\n    stds = deltas.new_tensor(stds).view(1, -1)\n    denorm_deltas = deltas * stds + means\n\n    dxy = denorm_deltas[:, :2]\n    dwh = denorm_deltas[:, 2:]\n\n    # Compute width/height of each roi\n    rois_ = rois.repeat(1, num_classes).reshape(-1, 4)\n    pxy = ((rois_[:, :2] + rois_[:, 2:]) * 0.5)\n    pwh = (rois_[:, 2:] - rois_[:, :2])\n\n    dxy_wh = pwh * dxy\n\n    max_ratio = np.abs(np.log(wh_ratio_clip))\n    if add_ctr_clamp:\n        dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp)\n        dwh = torch.clamp(dwh, max=max_ratio)\n    else:\n        dwh = dwh.clamp(min=-max_ratio, max=max_ratio)\n\n    gxy = pxy + dxy_wh\n    gwh = pwh * dwh.exp()\n    x1y1 = gxy - (gwh * 0.5)\n    x2y2 = gxy + (gwh * 0.5)\n    bboxes = torch.cat([x1y1, x2y2], dim=-1)\n    if clip_border and max_shape is not None:\n        bboxes[..., 0::2].clamp_(min=0, max=max_shape[1])\n        bboxes[..., 1::2].clamp_(min=0, max=max_shape[0])\n    bboxes = bboxes.reshape(num_bboxes, -1)\n    return bboxes\n\n\ndef onnx_delta2bbox(rois,\n                    deltas,\n                    means=(0., 0., 0., 0.),\n                    stds=(1., 1., 1., 1.),\n                    max_shape=None,\n                    wh_ratio_clip=16 / 1000,\n                    clip_border=True,\n                    add_ctr_clamp=False,\n                    ctr_clamp=32):\n    \"\"\"Apply deltas to shift/scale base boxes.\n\n    Typically the rois are anchor or proposed bounding boxes and the deltas are\n    network outputs used to shift/scale those boxes.\n    This is the inverse function of :func:`bbox2delta`.\n\n    Args:\n        rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4)\n        deltas (Tensor): Encoded offsets with respect to each roi.\n            Has shape (B, N, num_classes * 4) or (B, N, 4) or\n            (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H\n            when rois is a grid of anchors.Offset encoding follows [1]_.\n        means (Sequence[float]): Denormalizing means for delta coordinates.\n            Default (0., 0., 0., 0.).\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates. Default (1., 1., 1., 1.).\n        max_shape (Sequence[int] or torch.Tensor or Sequence[\n            Sequence[int]],optional): Maximum bounds for boxes, specifies\n            (H, W, C) or (H, W). If rois shape is (B, N, 4), then\n            the max_shape should be a Sequence[Sequence[int]]\n            and the length of max_shape should also be B. Default None.\n        wh_ratio_clip (float): Maximum aspect ratio for boxes.\n            Default 16 / 1000.\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Default True.\n        add_ctr_clamp (bool): Whether to add center clamp, when added, the\n            predicted box is clamped is its center is too far away from\n            the original anchor's center. Only used by YOLOF. Default False.\n        ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF.\n            Default 32.\n\n    Returns:\n        Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or\n           (N, num_classes * 4) or (N, 4), where 4 represent\n           tl_x, tl_y, br_x, br_y.\n\n    References:\n        .. [1] https://arxiv.org/abs/1311.2524\n\n    Example:\n        >>> rois = torch.Tensor([[ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 5.,  5.,  5.,  5.]])\n        >>> deltas = torch.Tensor([[  0.,   0.,   0.,   0.],\n        >>>                        [  1.,   1.,   1.,   1.],\n        >>>                        [  0.,   0.,   2.,  -1.],\n        >>>                        [ 0.7, -1.9, -0.5,  0.3]])\n        >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3))\n        tensor([[0.0000, 0.0000, 1.0000, 1.0000],\n                [0.1409, 0.1409, 2.8591, 2.8591],\n                [0.0000, 0.3161, 4.1945, 0.6839],\n                [5.0000, 5.0000, 5.0000, 5.0000]])\n    \"\"\"\n    means = deltas.new_tensor(means).view(1,\n                                          -1).repeat(1,\n                                                     deltas.size(-1) // 4)\n    stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4)\n    denorm_deltas = deltas * stds + means\n    dx = denorm_deltas[..., 0::4]\n    dy = denorm_deltas[..., 1::4]\n    dw = denorm_deltas[..., 2::4]\n    dh = denorm_deltas[..., 3::4]\n\n    x1, y1 = rois[..., 0], rois[..., 1]\n    x2, y2 = rois[..., 2], rois[..., 3]\n    # Compute center of each roi\n    px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx)\n    py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy)\n    # Compute width/height of each roi\n    pw = (x2 - x1).unsqueeze(-1).expand_as(dw)\n    ph = (y2 - y1).unsqueeze(-1).expand_as(dh)\n\n    dx_width = pw * dx\n    dy_height = ph * dy\n\n    max_ratio = np.abs(np.log(wh_ratio_clip))\n    if add_ctr_clamp:\n        dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp)\n        dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp)\n        dw = torch.clamp(dw, max=max_ratio)\n        dh = torch.clamp(dh, max=max_ratio)\n    else:\n        dw = dw.clamp(min=-max_ratio, max=max_ratio)\n        dh = dh.clamp(min=-max_ratio, max=max_ratio)\n    # Use exp(network energy) to enlarge/shrink each roi\n    gw = pw * dw.exp()\n    gh = ph * dh.exp()\n    # Use network energy to shift the center of each roi\n    gx = px + dx_width\n    gy = py + dy_height\n    # Convert center-xy/width/height to top-left, bottom-right\n    x1 = gx - gw * 0.5\n    y1 = gy - gh * 0.5\n    x2 = gx + gw * 0.5\n    y2 = gy + gh * 0.5\n\n    bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())\n\n    if clip_border and max_shape is not None:\n        # clip bboxes with dynamic `min` and `max` for onnx\n        if torch.onnx.is_in_onnx_export():\n            from mmdet.core.export import dynamic_clip_for_onnx\n            x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)\n            bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size())\n            return bboxes\n        if not isinstance(max_shape, torch.Tensor):\n            max_shape = x1.new_tensor(max_shape)\n        max_shape = max_shape[..., :2].type_as(x1)\n        if max_shape.ndim == 2:\n            assert bboxes.ndim == 3\n            assert max_shape.size(0) == bboxes.size(0)\n\n        min_xy = x1.new_tensor(0)\n        max_xy = torch.cat(\n            [max_shape] * (deltas.size(-1) // 2),\n            dim=-1).flip(-1).unsqueeze(-2)\n        bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)\n        bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)\n\n    return bboxes\n"
  },
  {
    "path": "mmdet/core/bbox/coder/distance_point_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import BBOX_CODERS\nfrom ..transforms import bbox2distance, distance2bbox\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@BBOX_CODERS.register_module()\nclass DistancePointBBoxCoder(BaseBBoxCoder):\n    \"\"\"Distance Point BBox coder.\n\n    This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,\n    right) and decode it back to the original.\n\n    Args:\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n    \"\"\"\n\n    def __init__(self, clip_border=True):\n        super(BaseBBoxCoder, self).__init__()\n        self.clip_border = clip_border\n\n    def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):\n        \"\"\"Encode bounding box to distances.\n\n        Args:\n            points (Tensor): Shape (N, 2), The format is [x, y].\n            gt_bboxes (Tensor): Shape (N, 4), The format is \"xyxy\"\n            max_dis (float): Upper bound of the distance. Default None.\n            eps (float): a small value to ensure target < max_dis, instead <=.\n                Default 0.1.\n\n        Returns:\n            Tensor: Box transformation deltas. The shape is (N, 4).\n        \"\"\"\n        assert points.size(0) == gt_bboxes.size(0)\n        assert points.size(-1) == 2\n        assert gt_bboxes.size(-1) == 4\n        return bbox2distance(points, gt_bboxes, max_dis, eps)\n\n    def decode(self, points, pred_bboxes, max_shape=None):\n        \"\"\"Decode distance prediction to bounding box.\n\n        Args:\n            points (Tensor): Shape (B, N, 2) or (N, 2).\n            pred_bboxes (Tensor): Distance from the given point to 4\n                boundaries (left, top, right, bottom). Shape (B, N, 4)\n                or (N, 4)\n            max_shape (Sequence[int] or torch.Tensor or Sequence[\n                Sequence[int]],optional): Maximum bounds for boxes, specifies\n                (H, W, C) or (H, W). If priors shape is (B, N, 4), then\n                the max_shape should be a Sequence[Sequence[int]],\n                and the length of max_shape should also be B.\n                Default None.\n        Returns:\n            Tensor: Boxes with shape (N, 4) or (B, N, 4)\n        \"\"\"\n        assert points.size(0) == pred_bboxes.size(0)\n        assert points.size(-1) == 2\n        assert pred_bboxes.size(-1) == 4\n        if self.clip_border is False:\n            max_shape = None\n        return distance2bbox(points, pred_bboxes, max_shape)\n"
  },
  {
    "path": "mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\nimport torch\n\nfrom ..builder import BBOX_CODERS\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@BBOX_CODERS.register_module()\nclass LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder):\n    \"\"\"Legacy Delta XYWH BBox coder used in MMDet V1.x.\n\n    Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2,\n    y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh)\n    back to original bbox (x1, y1, x2, y2).\n\n    Note:\n        The main difference between :class`LegacyDeltaXYWHBBoxCoder` and\n        :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and\n        height calculation. We suggest to only use this coder when testing with\n        MMDet V1.x models.\n\n    References:\n        .. [1] https://arxiv.org/abs/1311.2524\n\n    Args:\n        target_means (Sequence[float]): denormalizing means of target for\n            delta coordinates\n        target_stds (Sequence[float]): denormalizing standard deviation of\n            target for delta coordinates\n    \"\"\"\n\n    def __init__(self,\n                 target_means=(0., 0., 0., 0.),\n                 target_stds=(1., 1., 1., 1.)):\n        super(BaseBBoxCoder, self).__init__()\n        self.means = target_means\n        self.stds = target_stds\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Get box regression transformation deltas that can be used to\n        transform the ``bboxes`` into the ``gt_bboxes``.\n\n        Args:\n            bboxes (torch.Tensor): source boxes, e.g., object proposals.\n            gt_bboxes (torch.Tensor): target of the transformation, e.g.,\n                ground-truth boxes.\n\n        Returns:\n            torch.Tensor: Box transformation deltas\n        \"\"\"\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means,\n                                           self.stds)\n        return encoded_bboxes\n\n    def decode(self,\n               bboxes,\n               pred_bboxes,\n               max_shape=None,\n               wh_ratio_clip=16 / 1000):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n        Args:\n            boxes (torch.Tensor): Basic boxes.\n            pred_bboxes (torch.Tensor): Encoded boxes with shape\n            max_shape (tuple[int], optional): Maximum shape of boxes.\n                Defaults to None.\n            wh_ratio_clip (float, optional): The allowed ratio between\n                width and height.\n\n        Returns:\n            torch.Tensor: Decoded boxes.\n        \"\"\"\n        assert pred_bboxes.size(0) == bboxes.size(0)\n        decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means,\n                                           self.stds, max_shape, wh_ratio_clip)\n\n        return decoded_bboxes\n\n\n@mmcv.jit(coderize=True)\ndef legacy_bbox2delta(proposals,\n                      gt,\n                      means=(0., 0., 0., 0.),\n                      stds=(1., 1., 1., 1.)):\n    \"\"\"Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner.\n\n    We usually compute the deltas of x, y, w, h of proposals w.r.t ground\n    truth bboxes to get regression target.\n    This is the inverse function of `delta2bbox()`\n\n    Args:\n        proposals (Tensor): Boxes to be transformed, shape (N, ..., 4)\n        gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4)\n        means (Sequence[float]): Denormalizing means for delta coordinates\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates\n\n    Returns:\n        Tensor: deltas with shape (N, 4), where columns represent dx, dy,\n            dw, dh.\n    \"\"\"\n    assert proposals.size() == gt.size()\n\n    proposals = proposals.float()\n    gt = gt.float()\n    px = (proposals[..., 0] + proposals[..., 2]) * 0.5\n    py = (proposals[..., 1] + proposals[..., 3]) * 0.5\n    pw = proposals[..., 2] - proposals[..., 0] + 1.0\n    ph = proposals[..., 3] - proposals[..., 1] + 1.0\n\n    gx = (gt[..., 0] + gt[..., 2]) * 0.5\n    gy = (gt[..., 1] + gt[..., 3]) * 0.5\n    gw = gt[..., 2] - gt[..., 0] + 1.0\n    gh = gt[..., 3] - gt[..., 1] + 1.0\n\n    dx = (gx - px) / pw\n    dy = (gy - py) / ph\n    dw = torch.log(gw / pw)\n    dh = torch.log(gh / ph)\n    deltas = torch.stack([dx, dy, dw, dh], dim=-1)\n\n    means = deltas.new_tensor(means).unsqueeze(0)\n    stds = deltas.new_tensor(stds).unsqueeze(0)\n    deltas = deltas.sub_(means).div_(stds)\n\n    return deltas\n\n\n@mmcv.jit(coderize=True)\ndef legacy_delta2bbox(rois,\n                      deltas,\n                      means=(0., 0., 0., 0.),\n                      stds=(1., 1., 1., 1.),\n                      max_shape=None,\n                      wh_ratio_clip=16 / 1000):\n    \"\"\"Apply deltas to shift/scale base boxes in the MMDet V1.x manner.\n\n    Typically the rois are anchor or proposed bounding boxes and the deltas are\n    network outputs used to shift/scale those boxes.\n    This is the inverse function of `bbox2delta()`\n\n    Args:\n        rois (Tensor): Boxes to be transformed. Has shape (N, 4)\n        deltas (Tensor): Encoded offsets with respect to each roi.\n            Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when\n            rois is a grid of anchors. Offset encoding follows [1]_.\n        means (Sequence[float]): Denormalizing means for delta coordinates\n        stds (Sequence[float]): Denormalizing standard deviation for delta\n            coordinates\n        max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W)\n        wh_ratio_clip (float): Maximum aspect ratio for boxes.\n\n    Returns:\n        Tensor: Boxes with shape (N, 4), where columns represent\n            tl_x, tl_y, br_x, br_y.\n\n    References:\n        .. [1] https://arxiv.org/abs/1311.2524\n\n    Example:\n        >>> rois = torch.Tensor([[ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 0.,  0.,  1.,  1.],\n        >>>                      [ 5.,  5.,  5.,  5.]])\n        >>> deltas = torch.Tensor([[  0.,   0.,   0.,   0.],\n        >>>                        [  1.,   1.,   1.,   1.],\n        >>>                        [  0.,   0.,   2.,  -1.],\n        >>>                        [ 0.7, -1.9, -0.5,  0.3]])\n        >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32))\n        tensor([[0.0000, 0.0000, 1.5000, 1.5000],\n                [0.0000, 0.0000, 5.2183, 5.2183],\n                [0.0000, 0.1321, 7.8891, 0.8679],\n                [5.3967, 2.4251, 6.0033, 3.7749]])\n    \"\"\"\n    means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4)\n    stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4)\n    denorm_deltas = deltas * stds + means\n    dx = denorm_deltas[:, 0::4]\n    dy = denorm_deltas[:, 1::4]\n    dw = denorm_deltas[:, 2::4]\n    dh = denorm_deltas[:, 3::4]\n    max_ratio = np.abs(np.log(wh_ratio_clip))\n    dw = dw.clamp(min=-max_ratio, max=max_ratio)\n    dh = dh.clamp(min=-max_ratio, max=max_ratio)\n    # Compute center of each roi\n    px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx)\n    py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy)\n    # Compute width/height of each roi\n    pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw)\n    ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh)\n    # Use exp(network energy) to enlarge/shrink each roi\n    gw = pw * dw.exp()\n    gh = ph * dh.exp()\n    # Use network energy to shift the center of each roi\n    gx = px + pw * dx\n    gy = py + ph * dy\n    # Convert center-xy/width/height to top-left, bottom-right\n\n    # The true legacy box coder should +- 0.5 here.\n    # However, current implementation improves the performance when testing\n    # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP)\n    x1 = gx - gw * 0.5\n    y1 = gy - gh * 0.5\n    x2 = gx + gw * 0.5\n    y2 = gy + gh * 0.5\n    if max_shape is not None:\n        x1 = x1.clamp(min=0, max=max_shape[1] - 1)\n        y1 = y1.clamp(min=0, max=max_shape[0] - 1)\n        x2 = x2.clamp(min=0, max=max_shape[1] - 1)\n        y2 = y2.clamp(min=0, max=max_shape[0] - 1)\n    bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas)\n    return bboxes\n"
  },
  {
    "path": "mmdet/core/bbox/coder/pseudo_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import BBOX_CODERS\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@BBOX_CODERS.register_module()\nclass PseudoBBoxCoder(BaseBBoxCoder):\n    \"\"\"Pseudo bounding box coder.\"\"\"\n\n    def __init__(self, **kwargs):\n        super(BaseBBoxCoder, self).__init__(**kwargs)\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"torch.Tensor: return the given ``bboxes``\"\"\"\n        return gt_bboxes\n\n    def decode(self, bboxes, pred_bboxes):\n        \"\"\"torch.Tensor: return the given ``pred_bboxes``\"\"\"\n        return pred_bboxes\n"
  },
  {
    "path": "mmdet/core/bbox/coder/tblr_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch\n\nfrom ..builder import BBOX_CODERS\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@BBOX_CODERS.register_module()\nclass TBLRBBoxCoder(BaseBBoxCoder):\n    \"\"\"TBLR BBox coder.\n\n    Following the practice in `FSAF <https://arxiv.org/abs/1903.00621>`_,\n    this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left,\n    right) and decode it back to the original.\n\n    Args:\n        normalizer (list | float): Normalization factor to be\n          divided with when coding the coordinates. If it is a list, it should\n          have length of 4 indicating normalization factor in tblr dims.\n          Otherwise it is a unified float factor for all dims. Default: 4.0\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n    \"\"\"\n\n    def __init__(self, normalizer=4.0, clip_border=True):\n        super(BaseBBoxCoder, self).__init__()\n        self.normalizer = normalizer\n        self.clip_border = clip_border\n\n    def encode(self, bboxes, gt_bboxes):\n        \"\"\"Get box regression transformation deltas that can be used to\n        transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left,\n        bottom, right) order.\n\n        Args:\n            bboxes (torch.Tensor): source boxes, e.g., object proposals.\n            gt_bboxes (torch.Tensor): target of the transformation, e.g.,\n                ground truth boxes.\n\n        Returns:\n            torch.Tensor: Box transformation deltas\n        \"\"\"\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        encoded_bboxes = bboxes2tblr(\n            bboxes, gt_bboxes, normalizer=self.normalizer)\n        return encoded_bboxes\n\n    def decode(self, bboxes, pred_bboxes, max_shape=None):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n        Args:\n            bboxes (torch.Tensor): Basic boxes.Shape (B, N, 4) or (N, 4)\n            pred_bboxes (torch.Tensor): Encoded boxes with shape\n               (B, N, 4) or (N, 4)\n            max_shape (Sequence[int] or torch.Tensor or Sequence[\n               Sequence[int]],optional): Maximum bounds for boxes, specifies\n               (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then\n               the max_shape should be a Sequence[Sequence[int]]\n               and the length of max_shape should also be B.\n\n        Returns:\n            torch.Tensor: Decoded boxes.\n        \"\"\"\n        decoded_bboxes = tblr2bboxes(\n            bboxes,\n            pred_bboxes,\n            normalizer=self.normalizer,\n            max_shape=max_shape,\n            clip_border=self.clip_border)\n\n        return decoded_bboxes\n\n\n@mmcv.jit(coderize=True)\ndef bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):\n    \"\"\"Encode ground truth boxes to tblr coordinate.\n\n    It first convert the gt coordinate to tblr format,\n     (top, bottom, left, right), relative to prior box centers.\n     The tblr coordinate may be normalized by the side length of prior bboxes\n     if `normalize_by_wh` is specified as True, and it is then normalized by\n     the `normalizer` factor.\n\n    Args:\n        priors (Tensor): Prior boxes in point form\n            Shape: (num_proposals,4).\n        gts (Tensor): Coords of ground truth for each prior in point-form\n            Shape: (num_proposals, 4).\n        normalizer (Sequence[float] | float): normalization parameter of\n            encoded boxes. If it is a list, it has to have length = 4.\n            Default: 4.0\n        normalize_by_wh (bool): Whether to normalize tblr coordinate by the\n            side length (wh) of prior bboxes.\n\n    Return:\n        encoded boxes (Tensor), Shape: (num_proposals, 4)\n    \"\"\"\n\n    # dist b/t match center and prior's center\n    if not isinstance(normalizer, float):\n        normalizer = torch.tensor(normalizer, device=priors.device)\n        assert len(normalizer) == 4, 'Normalizer must have length = 4'\n    assert priors.size(0) == gts.size(0)\n    prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2\n    xmin, ymin, xmax, ymax = gts.split(1, dim=1)\n    top = prior_centers[:, 1].unsqueeze(1) - ymin\n    bottom = ymax - prior_centers[:, 1].unsqueeze(1)\n    left = prior_centers[:, 0].unsqueeze(1) - xmin\n    right = xmax - prior_centers[:, 0].unsqueeze(1)\n    loc = torch.cat((top, bottom, left, right), dim=1)\n    if normalize_by_wh:\n        # Normalize tblr by anchor width and height\n        wh = priors[:, 2:4] - priors[:, 0:2]\n        w, h = torch.split(wh, 1, dim=1)\n        loc[:, :2] /= h  # tb is normalized by h\n        loc[:, 2:] /= w  # lr is normalized by w\n    # Normalize tblr by the given normalization factor\n    return loc / normalizer\n\n\n@mmcv.jit(coderize=True)\ndef tblr2bboxes(priors,\n                tblr,\n                normalizer=4.0,\n                normalize_by_wh=True,\n                max_shape=None,\n                clip_border=True):\n    \"\"\"Decode tblr outputs to prediction boxes.\n\n    The process includes 3 steps: 1) De-normalize tblr coordinates by\n    multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the\n    prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert\n    tblr (top, bottom, left, right) pair relative to the center of priors back\n    to (xmin, ymin, xmax, ymax) coordinate.\n\n    Args:\n        priors (Tensor): Prior boxes in point form (x0, y0, x1, y1)\n          Shape: (N,4) or (B, N, 4).\n        tblr (Tensor): Coords of network output in tblr form\n          Shape: (N, 4) or (B, N, 4).\n        normalizer (Sequence[float] | float): Normalization parameter of\n          encoded boxes. By list, it represents the normalization factors at\n          tblr dims. By float, it is the unified normalization factor at all\n          dims. Default: 4.0\n        normalize_by_wh (bool): Whether the tblr coordinates have been\n          normalized by the side length (wh) of prior bboxes.\n        max_shape (Sequence[int] or torch.Tensor or Sequence[\n            Sequence[int]],optional): Maximum bounds for boxes, specifies\n            (H, W, C) or (H, W). If priors shape is (B, N, 4), then\n            the max_shape should be a Sequence[Sequence[int]]\n            and the length of max_shape should also be B.\n        clip_border (bool, optional): Whether clip the objects outside the\n            border of the image. Defaults to True.\n\n    Return:\n        encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4)\n    \"\"\"\n    if not isinstance(normalizer, float):\n        normalizer = torch.tensor(normalizer, device=priors.device)\n        assert len(normalizer) == 4, 'Normalizer must have length = 4'\n    assert priors.size(0) == tblr.size(0)\n    if priors.ndim == 3:\n        assert priors.size(1) == tblr.size(1)\n\n    loc_decode = tblr * normalizer\n    prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2\n    if normalize_by_wh:\n        wh = priors[..., 2:4] - priors[..., 0:2]\n        w, h = torch.split(wh, 1, dim=-1)\n        # Inplace operation with slice would failed for exporting to ONNX\n        th = h * loc_decode[..., :2]  # tb\n        tw = w * loc_decode[..., 2:]  # lr\n        loc_decode = torch.cat([th, tw], dim=-1)\n    # Cannot be exported using onnx when loc_decode.split(1, dim=-1)\n    top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1)\n    xmin = prior_centers[..., 0].unsqueeze(-1) - left\n    xmax = prior_centers[..., 0].unsqueeze(-1) + right\n    ymin = prior_centers[..., 1].unsqueeze(-1) - top\n    ymax = prior_centers[..., 1].unsqueeze(-1) + bottom\n\n    bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1)\n\n    if clip_border and max_shape is not None:\n        # clip bboxes with dynamic `min` and `max` for onnx\n        if torch.onnx.is_in_onnx_export():\n            from mmdet.core.export import dynamic_clip_for_onnx\n            xmin, ymin, xmax, ymax = dynamic_clip_for_onnx(\n                xmin, ymin, xmax, ymax, max_shape)\n            bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1)\n            return bboxes\n        if not isinstance(max_shape, torch.Tensor):\n            max_shape = priors.new_tensor(max_shape)\n        max_shape = max_shape[..., :2].type_as(priors)\n        if max_shape.ndim == 2:\n            assert bboxes.ndim == 3\n            assert max_shape.size(0) == bboxes.size(0)\n\n        min_xy = priors.new_tensor(0)\n        max_xy = torch.cat([max_shape, max_shape],\n                           dim=-1).flip(-1).unsqueeze(-2)\n        bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)\n        bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)\n\n    return bboxes\n"
  },
  {
    "path": "mmdet/core/bbox/coder/yolo_bbox_coder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch\n\nfrom ..builder import BBOX_CODERS\nfrom .base_bbox_coder import BaseBBoxCoder\n\n\n@BBOX_CODERS.register_module()\nclass YOLOBBoxCoder(BaseBBoxCoder):\n    \"\"\"YOLO BBox coder.\n\n    Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide\n    image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh).\n    cx, cy in [0., 1.], denotes relative center position w.r.t the center of\n    bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`.\n\n    Args:\n        eps (float): Min value of cx, cy when encoding.\n    \"\"\"\n\n    def __init__(self, eps=1e-6):\n        super(BaseBBoxCoder, self).__init__()\n        self.eps = eps\n\n    @mmcv.jit(coderize=True)\n    def encode(self, bboxes, gt_bboxes, stride):\n        \"\"\"Get box regression transformation deltas that can be used to\n        transform the ``bboxes`` into the ``gt_bboxes``.\n\n        Args:\n            bboxes (torch.Tensor): Source boxes, e.g., anchors.\n            gt_bboxes (torch.Tensor): Target of the transformation, e.g.,\n                ground-truth boxes.\n            stride (torch.Tensor | int): Stride of bboxes.\n\n        Returns:\n            torch.Tensor: Box transformation deltas\n        \"\"\"\n\n        assert bboxes.size(0) == gt_bboxes.size(0)\n        assert bboxes.size(-1) == gt_bboxes.size(-1) == 4\n        x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5\n        y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5\n        w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0]\n        h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1]\n        x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5\n        y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5\n        w = bboxes[..., 2] - bboxes[..., 0]\n        h = bboxes[..., 3] - bboxes[..., 1]\n        w_target = torch.log((w_gt / w).clamp(min=self.eps))\n        h_target = torch.log((h_gt / h).clamp(min=self.eps))\n        x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp(\n            self.eps, 1 - self.eps)\n        y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp(\n            self.eps, 1 - self.eps)\n        encoded_bboxes = torch.stack(\n            [x_center_target, y_center_target, w_target, h_target], dim=-1)\n        return encoded_bboxes\n\n    @mmcv.jit(coderize=True)\n    def decode(self, bboxes, pred_bboxes, stride):\n        \"\"\"Apply transformation `pred_bboxes` to `boxes`.\n\n        Args:\n            boxes (torch.Tensor): Basic boxes, e.g. anchors.\n            pred_bboxes (torch.Tensor): Encoded boxes with shape\n            stride (torch.Tensor | int): Strides of bboxes.\n\n        Returns:\n            torch.Tensor: Decoded boxes.\n        \"\"\"\n        assert pred_bboxes.size(-1) == bboxes.size(-1) == 4\n        xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + (\n            pred_bboxes[..., :2] - 0.5) * stride\n        whs = (bboxes[..., 2:] -\n               bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp()\n        decoded_bboxes = torch.stack(\n            (xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] -\n             whs[..., 1], xy_centers[..., 0] + whs[..., 0],\n             xy_centers[..., 1] + whs[..., 1]),\n            dim=-1)\n        return decoded_bboxes\n"
  },
  {
    "path": "mmdet/core/bbox/demodata.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom mmdet.utils.util_random import ensure_rng\n\n\ndef random_boxes(num=1, scale=1, rng=None):\n    \"\"\"Simple version of ``kwimage.Boxes.random``\n\n    Returns:\n        Tensor: shape (n, 4) in x1, y1, x2, y2 format.\n\n    References:\n        https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390\n\n    Example:\n        >>> num = 3\n        >>> scale = 512\n        >>> rng = 0\n        >>> boxes = random_boxes(num, scale, rng)\n        >>> print(boxes)\n        tensor([[280.9925, 278.9802, 308.6148, 366.1769],\n                [216.9113, 330.6978, 224.0446, 456.5878],\n                [405.3632, 196.3221, 493.3953, 270.7942]])\n    \"\"\"\n    rng = ensure_rng(rng)\n\n    tlbr = rng.rand(num, 4).astype(np.float32)\n\n    tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])\n    tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])\n    br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])\n    br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])\n\n    tlbr[:, 0] = tl_x * scale\n    tlbr[:, 1] = tl_y * scale\n    tlbr[:, 2] = br_x * scale\n    tlbr[:, 3] = br_y * scale\n\n    boxes = torch.from_numpy(tlbr)\n    return boxes\n"
  },
  {
    "path": "mmdet/core/bbox/iou_calculators/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .builder import build_iou_calculator\nfrom .iou2d_calculator import BboxOverlaps2D, bbox_overlaps\n\n__all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps']\n"
  },
  {
    "path": "mmdet/core/bbox/iou_calculators/builder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.utils import Registry, build_from_cfg\n\nIOU_CALCULATORS = Registry('IoU calculator')\n\n\ndef build_iou_calculator(cfg, default_args=None):\n    \"\"\"Builder of IoU calculator.\"\"\"\n    return build_from_cfg(cfg, IOU_CALCULATORS, default_args)\n"
  },
  {
    "path": "mmdet/core/bbox/iou_calculators/iou2d_calculator.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom .builder import IOU_CALCULATORS\n\n\ndef cast_tensor_type(x, scale=1., dtype=None):\n    if dtype == 'fp16':\n        # scale is for preventing overflows\n        x = (x / scale).half()\n    return x\n\n\ndef fp16_clamp(x, min=None, max=None):\n    if not x.is_cuda and x.dtype == torch.float16:\n        # clamp for cpu float16, tensor fp16 has no clamp implementation\n        return x.float().clamp(min, max).half()\n\n    return x.clamp(min, max)\n\n\n@IOU_CALCULATORS.register_module()\nclass BboxOverlaps2D:\n    \"\"\"2D Overlaps (e.g. IoUs, GIoUs) Calculator.\"\"\"\n\n    def __init__(self, scale=1., dtype=None):\n        self.scale = scale\n        self.dtype = dtype\n\n    def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):\n        \"\"\"Calculate IoU between 2D bboxes.\n\n        Args:\n            bboxes1 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2>\n                format, or shape (m, 5) in <x1, y1, x2, y2, score> format.\n            bboxes2 (Tensor): bboxes have shape (n, 4) in <x1, y1, x2, y2>\n                format, shape (n, 5) in <x1, y1, x2, y2, score> format, or be\n                empty.\n            mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n                over foreground), or \"giou\" (generalized intersection over\n                union).\n            is_aligned (bool, optional): If True, then m and n must be equal.\n                Default False.\n\n        Returns:\n            Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)\n        \"\"\"\n        assert bboxes1.size(-1) in [0, 4, 5]\n        assert bboxes2.size(-1) in [0, 4, 5]\n        if bboxes2.size(-1) == 5:\n            bboxes2 = bboxes2[..., :4]\n        if bboxes1.size(-1) == 5:\n            bboxes1 = bboxes1[..., :4]\n\n        if self.dtype == 'fp16':\n            # change tensor type to save cpu and cuda memory and keep speed\n            bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype)\n            bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype)\n            overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)\n            if not overlaps.is_cuda and overlaps.dtype == torch.float16:\n                # resume cpu float32\n                overlaps = overlaps.float()\n            return overlaps\n\n        return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned)\n\n    def __repr__(self):\n        \"\"\"str: a string describing the module\"\"\"\n        repr_str = self.__class__.__name__ + f'(' \\\n            f'scale={self.scale}, dtype={self.dtype})'\n        return repr_str\n\n\ndef bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):\n    \"\"\"Calculate overlap between two set of bboxes.\n\n    FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889\n    Note:\n        Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',\n        there are some new generated variable when calculating IOU\n        using bbox_overlaps function:\n\n        1) is_aligned is False\n            area1: M x 1\n            area2: N x 1\n            lt: M x N x 2\n            rb: M x N x 2\n            wh: M x N x 2\n            overlap: M x N x 1\n            union: M x N x 1\n            ious: M x N x 1\n\n            Total memory:\n                S = (9 x N x M + N + M) * 4 Byte,\n\n            When using FP16, we can reduce:\n                R = (9 x N x M + N + M) * 4 / 2 Byte\n                R large than (N + M) * 4 * 2 is always true when N and M >= 1.\n                Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,\n                           N + 1 < 3 * N, when N or M is 1.\n\n            Given M = 40 (ground truth), N = 400000 (three anchor boxes\n            in per grid, FPN, R-CNNs),\n                R = 275 MB (one times)\n\n            A special case (dense detection), M = 512 (ground truth),\n                R = 3516 MB = 3.43 GB\n\n            When the batch size is B, reduce:\n                B x R\n\n            Therefore, CUDA memory runs out frequently.\n\n            Experiments on GeForce RTX 2080Ti (11019 MiB):\n\n            |   dtype   |   M   |   N   |   Use    |   Real   |   Ideal   |\n            |:----:|:----:|:----:|:----:|:----:|:----:|\n            |   FP32   |   512 | 400000 | 8020 MiB |   --   |   --   |\n            |   FP16   |   512 | 400000 |   4504 MiB | 3516 MiB | 3516 MiB |\n            |   FP32   |   40 | 400000 |   1540 MiB |   --   |   --   |\n            |   FP16   |   40 | 400000 |   1264 MiB |   276MiB   | 275 MiB |\n\n        2) is_aligned is True\n            area1: N x 1\n            area2: N x 1\n            lt: N x 2\n            rb: N x 2\n            wh: N x 2\n            overlap: N x 1\n            union: N x 1\n            ious: N x 1\n\n            Total memory:\n                S = 11 x N * 4 Byte\n\n            When using FP16, we can reduce:\n                R = 11 x N * 4 / 2 Byte\n\n        So do the 'giou' (large than 'iou').\n\n        Time-wise, FP16 is generally faster than FP32.\n\n        When gpu_assign_thr is not -1, it takes more time on cpu\n        but not reduce memory.\n        There, we can reduce half the memory and keep the speed.\n\n    If ``is_aligned`` is ``False``, then calculate the overlaps between each\n    bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned\n    pair of bboxes1 and bboxes2.\n\n    Args:\n        bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.\n        bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.\n            B indicates the batch dim, in shape (B1, B2, ..., Bn).\n            If ``is_aligned`` is ``True``, then m and n must be equal.\n        mode (str): \"iou\" (intersection over union), \"iof\" (intersection over\n            foreground) or \"giou\" (generalized intersection over union).\n            Default \"iou\".\n        is_aligned (bool, optional): If True, then m and n must be equal.\n            Default False.\n        eps (float, optional): A value added to the denominator for numerical\n            stability. Default 1e-6.\n\n    Returns:\n        Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n\n    Example:\n        >>> bboxes1 = torch.FloatTensor([\n        >>>     [0, 0, 10, 10],\n        >>>     [10, 10, 20, 20],\n        >>>     [32, 32, 38, 42],\n        >>> ])\n        >>> bboxes2 = torch.FloatTensor([\n        >>>     [0, 0, 10, 20],\n        >>>     [0, 10, 10, 19],\n        >>>     [10, 10, 20, 20],\n        >>> ])\n        >>> overlaps = bbox_overlaps(bboxes1, bboxes2)\n        >>> assert overlaps.shape == (3, 3)\n        >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)\n        >>> assert overlaps.shape == (3, )\n\n    Example:\n        >>> empty = torch.empty(0, 4)\n        >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])\n        >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)\n        >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)\n        >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)\n    \"\"\"\n\n    assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'\n    # Either the boxes are empty or the length of boxes' last dimension is 4\n    assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)\n    assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)\n\n    # Batch dim must be the same\n    # Batch dim: (B1, B2, ... Bn)\n    assert bboxes1.shape[:-2] == bboxes2.shape[:-2]\n    batch_shape = bboxes1.shape[:-2]\n\n    rows = bboxes1.size(-2)\n    cols = bboxes2.size(-2)\n    if is_aligned:\n        assert rows == cols\n\n    if rows * cols == 0:\n        if is_aligned:\n            return bboxes1.new(batch_shape + (rows, ))\n        else:\n            return bboxes1.new(batch_shape + (rows, cols))\n\n    area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (\n        bboxes1[..., 3] - bboxes1[..., 1])\n    area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (\n        bboxes2[..., 3] - bboxes2[..., 1])\n\n    if is_aligned:\n        lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])  # [B, rows, 2]\n        rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])  # [B, rows, 2]\n\n        wh = fp16_clamp(rb - lt, min=0)\n        overlap = wh[..., 0] * wh[..., 1]\n\n        if mode in ['iou', 'giou']:\n            union = area1 + area2 - overlap\n        else:\n            union = area1\n        if mode == 'giou':\n            enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])\n            enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])\n    else:\n        lt = torch.max(bboxes1[..., :, None, :2],\n                       bboxes2[..., None, :, :2])  # [B, rows, cols, 2]\n        rb = torch.min(bboxes1[..., :, None, 2:],\n                       bboxes2[..., None, :, 2:])  # [B, rows, cols, 2]\n\n        wh = fp16_clamp(rb - lt, min=0)\n        overlap = wh[..., 0] * wh[..., 1]\n\n        if mode in ['iou', 'giou']:\n            union = area1[..., None] + area2[..., None, :] - overlap\n        else:\n            union = area1[..., None]\n        if mode == 'giou':\n            enclosed_lt = torch.min(bboxes1[..., :, None, :2],\n                                    bboxes2[..., None, :, :2])\n            enclosed_rb = torch.max(bboxes1[..., :, None, 2:],\n                                    bboxes2[..., None, :, 2:])\n\n    eps = union.new_tensor([eps])\n    union = torch.max(union, eps)\n    ious = overlap / union\n    if mode in ['iou', 'iof']:\n        return ious\n    # calculate gious\n    enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)\n    enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]\n    enclose_area = torch.max(enclose_area, eps)\n    gious = ious - (enclose_area - union) / enclose_area\n    return gious\n"
  },
  {
    "path": "mmdet/core/bbox/match_costs/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .builder import build_match_cost\nfrom .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost,\n                         DiceCost, FocalLossCost, IoUCost)\n\n__all__ = [\n    'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost',\n    'FocalLossCost', 'DiceCost', 'CrossEntropyLossCost'\n]\n"
  },
  {
    "path": "mmdet/core/bbox/match_costs/builder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.utils import Registry, build_from_cfg\n\nMATCH_COST = Registry('Match Cost')\n\n\ndef build_match_cost(cfg, default_args=None):\n    \"\"\"Builder of IoU calculator.\"\"\"\n    return build_from_cfg(cfg, MATCH_COST, default_args)\n"
  },
  {
    "path": "mmdet/core/bbox/match_costs/match_cost.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn.functional as F\n\nfrom mmdet.core.bbox.iou_calculators import bbox_overlaps\nfrom mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh\nfrom .builder import MATCH_COST\n\n\n@MATCH_COST.register_module()\nclass BBoxL1Cost:\n    \"\"\"BBoxL1Cost.\n\n     Args:\n         weight (int | float, optional): loss_weight\n         box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN\n\n     Examples:\n         >>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost\n         >>> import torch\n         >>> self = BBoxL1Cost()\n         >>> bbox_pred = torch.rand(1, 4)\n         >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])\n         >>> factor = torch.tensor([10, 8, 10, 8])\n         >>> self(bbox_pred, gt_bboxes, factor)\n         tensor([[1.6172, 1.6422]])\n    \"\"\"\n\n    def __init__(self, weight=1., box_format='xyxy'):\n        self.weight = weight\n        assert box_format in ['xyxy', 'xywh']\n        self.box_format = box_format\n\n    def __call__(self, bbox_pred, gt_bboxes):\n        \"\"\"\n        Args:\n            bbox_pred (Tensor): Predicted boxes with normalized coordinates\n                (cx, cy, w, h), which are all in range [0, 1]. Shape\n                (num_query, 4).\n            gt_bboxes (Tensor): Ground truth boxes with normalized\n                coordinates (x1, y1, x2, y2). Shape (num_gt, 4).\n\n        Returns:\n            torch.Tensor: bbox_cost value with weight\n        \"\"\"\n        if self.box_format == 'xywh':\n            gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes)\n        elif self.box_format == 'xyxy':\n            bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred)\n        bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1)\n        return bbox_cost * self.weight\n\n\n@MATCH_COST.register_module()\nclass FocalLossCost:\n    \"\"\"FocalLossCost.\n\n     Args:\n         weight (int | float, optional): loss_weight\n         alpha (int | float, optional): focal_loss alpha\n         gamma (int | float, optional): focal_loss gamma\n         eps (float, optional): default 1e-12\n         binary_input (bool, optional): Whether the input is binary,\n            default False.\n\n     Examples:\n         >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost\n         >>> import torch\n         >>> self = FocalLossCost()\n         >>> cls_pred = torch.rand(4, 3)\n         >>> gt_labels = torch.tensor([0, 1, 2])\n         >>> factor = torch.tensor([10, 8, 10, 8])\n         >>> self(cls_pred, gt_labels)\n         tensor([[-0.3236, -0.3364, -0.2699],\n                [-0.3439, -0.3209, -0.4807],\n                [-0.4099, -0.3795, -0.2929],\n                [-0.1950, -0.1207, -0.2626]])\n    \"\"\"\n\n    def __init__(self,\n                 weight=1.,\n                 alpha=0.25,\n                 gamma=2,\n                 eps=1e-12,\n                 binary_input=False):\n        self.weight = weight\n        self.alpha = alpha\n        self.gamma = gamma\n        self.eps = eps\n        self.binary_input = binary_input\n\n    def _focal_loss_cost(self, cls_pred, gt_labels):\n        \"\"\"\n        Args:\n            cls_pred (Tensor): Predicted classification logits, shape\n                (num_query, num_class).\n            gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n\n        Returns:\n            torch.Tensor: cls_cost value with weight\n        \"\"\"\n        cls_pred = cls_pred.sigmoid()\n        neg_cost = -(1 - cls_pred + self.eps).log() * (\n            1 - self.alpha) * cls_pred.pow(self.gamma)\n        pos_cost = -(cls_pred + self.eps).log() * self.alpha * (\n            1 - cls_pred).pow(self.gamma)\n\n        cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels]\n        return cls_cost * self.weight\n\n    def _mask_focal_loss_cost(self, cls_pred, gt_labels):\n        \"\"\"\n        Args:\n            cls_pred (Tensor): Predicted classfication logits\n                in shape (num_query, d1, ..., dn), dtype=torch.float32.\n            gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn),\n                dtype=torch.long. Labels should be binary.\n\n        Returns:\n            Tensor: Focal cost matrix with weight in shape\\\n                (num_query, num_gt).\n        \"\"\"\n        cls_pred = cls_pred.flatten(1)\n        gt_labels = gt_labels.flatten(1).float()\n        n = cls_pred.shape[1]\n        cls_pred = cls_pred.sigmoid()\n        neg_cost = -(1 - cls_pred + self.eps).log() * (\n            1 - self.alpha) * cls_pred.pow(self.gamma)\n        pos_cost = -(cls_pred + self.eps).log() * self.alpha * (\n            1 - cls_pred).pow(self.gamma)\n\n        cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \\\n            torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels))\n        return cls_cost / n * self.weight\n\n    def __call__(self, cls_pred, gt_labels):\n        \"\"\"\n        Args:\n            cls_pred (Tensor): Predicted classfication logits.\n            gt_labels (Tensor)): Labels.\n\n        Returns:\n            Tensor: Focal cost matrix with weight in shape\\\n                (num_query, num_gt).\n        \"\"\"\n        if self.binary_input:\n            return self._mask_focal_loss_cost(cls_pred, gt_labels)\n        else:\n            return self._focal_loss_cost(cls_pred, gt_labels)\n\n\n@MATCH_COST.register_module()\nclass ClassificationCost:\n    \"\"\"ClsSoftmaxCost.\n\n     Args:\n         weight (int | float, optional): loss_weight\n\n     Examples:\n         >>> from mmdet.core.bbox.match_costs.match_cost import \\\n         ... ClassificationCost\n         >>> import torch\n         >>> self = ClassificationCost()\n         >>> cls_pred = torch.rand(4, 3)\n         >>> gt_labels = torch.tensor([0, 1, 2])\n         >>> factor = torch.tensor([10, 8, 10, 8])\n         >>> self(cls_pred, gt_labels)\n         tensor([[-0.3430, -0.3525, -0.3045],\n                [-0.3077, -0.2931, -0.3992],\n                [-0.3664, -0.3455, -0.2881],\n                [-0.3343, -0.2701, -0.3956]])\n    \"\"\"\n\n    def __init__(self, weight=1.):\n        self.weight = weight\n\n    def __call__(self, cls_pred, gt_labels):\n        \"\"\"\n        Args:\n            cls_pred (Tensor): Predicted classification logits, shape\n                (num_query, num_class).\n            gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).\n\n        Returns:\n            torch.Tensor: cls_cost value with weight\n        \"\"\"\n        # Following the official DETR repo, contrary to the loss that\n        # NLL is used, we approximate it in 1 - cls_score[gt_label].\n        # The 1 is a constant that doesn't change the matching,\n        # so it can be omitted.\n        cls_score = cls_pred.softmax(-1)\n        cls_cost = -cls_score[:, gt_labels]\n        return cls_cost * self.weight\n\n\n@MATCH_COST.register_module()\nclass IoUCost:\n    \"\"\"IoUCost.\n\n     Args:\n         iou_mode (str, optional): iou mode such as 'iou' | 'giou'\n         weight (int | float, optional): loss weight\n\n     Examples:\n         >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost\n         >>> import torch\n         >>> self = IoUCost()\n         >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]])\n         >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]])\n         >>> self(bboxes, gt_bboxes)\n         tensor([[-0.1250,  0.1667],\n                [ 0.1667, -0.5000]])\n    \"\"\"\n\n    def __init__(self, iou_mode='giou', weight=1.):\n        self.weight = weight\n        self.iou_mode = iou_mode\n\n    def __call__(self, bboxes, gt_bboxes):\n        \"\"\"\n        Args:\n            bboxes (Tensor): Predicted boxes with unnormalized coordinates\n                (x1, y1, x2, y2). Shape (num_query, 4).\n            gt_bboxes (Tensor): Ground truth boxes with unnormalized\n                coordinates (x1, y1, x2, y2). Shape (num_gt, 4).\n\n        Returns:\n            torch.Tensor: iou_cost value with weight\n        \"\"\"\n        # overlaps: [num_bboxes, num_gt]\n        overlaps = bbox_overlaps(\n            bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False)\n        # The 1 is a constant that doesn't change the matching, so omitted.\n        iou_cost = -overlaps\n        return iou_cost * self.weight\n\n\n@MATCH_COST.register_module()\nclass DiceCost:\n    \"\"\"Cost of mask assignments based on dice losses.\n\n    Args:\n        weight (int | float, optional): loss_weight. Defaults to 1.\n        pred_act (bool, optional): Whether to apply sigmoid to mask_pred.\n            Defaults to False.\n        eps (float, optional): default 1e-12.\n        naive_dice (bool, optional): If True, use the naive dice loss\n            in which the power of the number in the denominator is\n            the first power. If Flase, use the second power that\n            is adopted by K-Net and SOLO.\n            Defaults to True.\n    \"\"\"\n\n    def __init__(self, weight=1., pred_act=False, eps=1e-3, naive_dice=True):\n        self.weight = weight\n        self.pred_act = pred_act\n        self.eps = eps\n        self.naive_dice = naive_dice\n\n    def binary_mask_dice_loss(self, mask_preds, gt_masks):\n        \"\"\"\n        Args:\n            mask_preds (Tensor): Mask prediction in shape (num_query, *).\n            gt_masks (Tensor): Ground truth in shape (num_gt, *)\n                store 0 or 1, 0 for negative class and 1 for\n                positive class.\n\n        Returns:\n            Tensor: Dice cost matrix in shape (num_query, num_gt).\n        \"\"\"\n        mask_preds = mask_preds.flatten(1)\n        gt_masks = gt_masks.flatten(1).float()\n        numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks)\n        if self.naive_dice:\n            denominator = mask_preds.sum(-1)[:, None] + \\\n                gt_masks.sum(-1)[None, :]\n        else:\n            denominator = mask_preds.pow(2).sum(1)[:, None] + \\\n                gt_masks.pow(2).sum(1)[None, :]\n        loss = 1 - (numerator + self.eps) / (denominator + self.eps)\n        return loss\n\n    def __call__(self, mask_preds, gt_masks):\n        \"\"\"\n        Args:\n            mask_preds (Tensor): Mask prediction logits in shape (num_query, *)\n            gt_masks (Tensor): Ground truth in shape (num_gt, *)\n\n        Returns:\n            Tensor: Dice cost matrix with weight in shape (num_query, num_gt).\n        \"\"\"\n        if self.pred_act:\n            mask_preds = mask_preds.sigmoid()\n        dice_cost = self.binary_mask_dice_loss(mask_preds, gt_masks)\n        return dice_cost * self.weight\n\n\n@MATCH_COST.register_module()\nclass CrossEntropyLossCost:\n    \"\"\"CrossEntropyLossCost.\n\n    Args:\n        weight (int | float, optional): loss weight. Defaults to 1.\n        use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n                of softmax. Defaults to True.\n    Examples:\n         >>> from mmdet.core.bbox.match_costs import CrossEntropyLossCost\n         >>> import torch\n         >>> bce = CrossEntropyLossCost(use_sigmoid=True)\n         >>> cls_pred = torch.tensor([[7.6, 1.2], [-1.3, 10]])\n         >>> gt_labels = torch.tensor([[1, 1], [1, 0]])\n         >>> print(bce(cls_pred, gt_labels))\n    \"\"\"\n\n    def __init__(self, weight=1., use_sigmoid=True):\n        assert use_sigmoid, 'use_sigmoid = False is not supported yet.'\n        self.weight = weight\n        self.use_sigmoid = use_sigmoid\n\n    def _binary_cross_entropy(self, cls_pred, gt_labels):\n        \"\"\"\n        Args:\n            cls_pred (Tensor): The prediction with shape (num_query, 1, *) or\n                (num_query, *).\n            gt_labels (Tensor): The learning label of prediction with\n                shape (num_gt, *).\n\n        Returns:\n            Tensor: Cross entropy cost matrix in shape (num_query, num_gt).\n        \"\"\"\n        cls_pred = cls_pred.flatten(1).float()\n        gt_labels = gt_labels.flatten(1).float()\n        n = cls_pred.shape[1]\n        pos = F.binary_cross_entropy_with_logits(\n            cls_pred, torch.ones_like(cls_pred), reduction='none')\n        neg = F.binary_cross_entropy_with_logits(\n            cls_pred, torch.zeros_like(cls_pred), reduction='none')\n        cls_cost = torch.einsum('nc,mc->nm', pos, gt_labels) + \\\n            torch.einsum('nc,mc->nm', neg, 1 - gt_labels)\n        cls_cost = cls_cost / n\n\n        return cls_cost\n\n    def __call__(self, cls_pred, gt_labels):\n        \"\"\"\n        Args:\n            cls_pred (Tensor): Predicted classification logits.\n            gt_labels (Tensor): Labels.\n\n        Returns:\n            Tensor: Cross entropy cost matrix with weight in\n                shape (num_query, num_gt).\n        \"\"\"\n        if self.use_sigmoid:\n            cls_cost = self._binary_cross_entropy(cls_pred, gt_labels)\n        else:\n            raise NotImplementedError\n\n        return cls_cost * self.weight\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_sampler import BaseSampler\nfrom .combined_sampler import CombinedSampler\nfrom .instance_balanced_pos_sampler import InstanceBalancedPosSampler\nfrom .iou_balanced_neg_sampler import IoUBalancedNegSampler\nfrom .mask_pseudo_sampler import MaskPseudoSampler\nfrom .mask_sampling_result import MaskSamplingResult\nfrom .ohem_sampler import OHEMSampler\nfrom .pseudo_sampler import PseudoSampler\nfrom .random_sampler import RandomSampler\nfrom .sampling_result import SamplingResult\nfrom .score_hlr_sampler import ScoreHLRSampler\n\n__all__ = [\n    'BaseSampler', 'PseudoSampler', 'RandomSampler',\n    'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',\n    'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler',\n    'MaskSamplingResult'\n]\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/base_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\n\nfrom .sampling_result import SamplingResult\n\n\nclass BaseSampler(metaclass=ABCMeta):\n    \"\"\"Base class of samplers.\"\"\"\n\n    def __init__(self,\n                 num,\n                 pos_fraction,\n                 neg_pos_ub=-1,\n                 add_gt_as_proposals=True,\n                 **kwargs):\n        self.num = num\n        self.pos_fraction = pos_fraction\n        self.neg_pos_ub = neg_pos_ub\n        self.add_gt_as_proposals = add_gt_as_proposals\n        self.pos_sampler = self\n        self.neg_sampler = self\n\n    @abstractmethod\n    def _sample_pos(self, assign_result, num_expected, **kwargs):\n        \"\"\"Sample positive samples.\"\"\"\n        pass\n\n    @abstractmethod\n    def _sample_neg(self, assign_result, num_expected, **kwargs):\n        \"\"\"Sample negative samples.\"\"\"\n        pass\n\n    def sample(self,\n               assign_result,\n               bboxes,\n               gt_bboxes,\n               gt_labels=None,\n               **kwargs):\n        \"\"\"Sample positive and negative bboxes.\n\n        This is a simple implementation of bbox sampling given candidates,\n        assigning results and ground truth bboxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Bbox assigning results.\n            bboxes (Tensor): Boxes to be sampled from.\n            gt_bboxes (Tensor): Ground truth bboxes.\n            gt_labels (Tensor, optional): Class labels of ground truth bboxes.\n\n        Returns:\n            :obj:`SamplingResult`: Sampling result.\n\n        Example:\n            >>> from mmdet.core.bbox import RandomSampler\n            >>> from mmdet.core.bbox import AssignResult\n            >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes\n            >>> rng = ensure_rng(None)\n            >>> assign_result = AssignResult.random(rng=rng)\n            >>> bboxes = random_boxes(assign_result.num_preds, rng=rng)\n            >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)\n            >>> gt_labels = None\n            >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1,\n            >>>                      add_gt_as_proposals=False)\n            >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels)\n        \"\"\"\n        if len(bboxes.shape) < 2:\n            bboxes = bboxes[None, :]\n\n        bboxes = bboxes[:, :4]\n\n        gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)\n        if self.add_gt_as_proposals and len(gt_bboxes) > 0:\n            if gt_labels is None:\n                raise ValueError(\n                    'gt_labels must be given when add_gt_as_proposals is True')\n            bboxes = torch.cat([gt_bboxes, bboxes], dim=0)\n            assign_result.add_gt_(gt_labels)\n            gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)\n            gt_flags = torch.cat([gt_ones, gt_flags])\n\n        num_expected_pos = int(self.num * self.pos_fraction)\n        pos_inds = self.pos_sampler._sample_pos(\n            assign_result, num_expected_pos, bboxes=bboxes, **kwargs)\n        # We found that sampled indices have duplicated items occasionally.\n        # (may be a bug of PyTorch)\n        pos_inds = pos_inds.unique()\n        num_sampled_pos = pos_inds.numel()\n        num_expected_neg = self.num - num_sampled_pos\n        if self.neg_pos_ub >= 0:\n            _pos = max(1, num_sampled_pos)\n            neg_upper_bound = int(self.neg_pos_ub * _pos)\n            if num_expected_neg > neg_upper_bound:\n                num_expected_neg = neg_upper_bound\n        neg_inds = self.neg_sampler._sample_neg(\n            assign_result, num_expected_neg, bboxes=bboxes, **kwargs)\n        neg_inds = neg_inds.unique()\n\n        sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,\n                                         assign_result, gt_flags)\n        return sampling_result\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/combined_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import BBOX_SAMPLERS, build_sampler\nfrom .base_sampler import BaseSampler\n\n\n@BBOX_SAMPLERS.register_module()\nclass CombinedSampler(BaseSampler):\n    \"\"\"A sampler that combines positive sampler and negative sampler.\"\"\"\n\n    def __init__(self, pos_sampler, neg_sampler, **kwargs):\n        super(CombinedSampler, self).__init__(**kwargs)\n        self.pos_sampler = build_sampler(pos_sampler, **kwargs)\n        self.neg_sampler = build_sampler(neg_sampler, **kwargs)\n\n    def _sample_pos(self, **kwargs):\n        \"\"\"Sample positive samples.\"\"\"\n        raise NotImplementedError\n\n    def _sample_neg(self, **kwargs):\n        \"\"\"Sample negative samples.\"\"\"\n        raise NotImplementedError\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom ..builder import BBOX_SAMPLERS\nfrom .random_sampler import RandomSampler\n\n\n@BBOX_SAMPLERS.register_module()\nclass InstanceBalancedPosSampler(RandomSampler):\n    \"\"\"Instance balanced sampler that samples equal number of positive samples\n    for each instance.\"\"\"\n\n    def _sample_pos(self, assign_result, num_expected, **kwargs):\n        \"\"\"Sample positive boxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): The assigned results of boxes.\n            num_expected (int): The number of expected positive samples\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)\n        if pos_inds.numel() != 0:\n            pos_inds = pos_inds.squeeze(1)\n        if pos_inds.numel() <= num_expected:\n            return pos_inds\n        else:\n            unique_gt_inds = assign_result.gt_inds[pos_inds].unique()\n            num_gts = len(unique_gt_inds)\n            num_per_gt = int(round(num_expected / float(num_gts)) + 1)\n            sampled_inds = []\n            for i in unique_gt_inds:\n                inds = torch.nonzero(\n                    assign_result.gt_inds == i.item(), as_tuple=False)\n                if inds.numel() != 0:\n                    inds = inds.squeeze(1)\n                else:\n                    continue\n                if len(inds) > num_per_gt:\n                    inds = self.random_choice(inds, num_per_gt)\n                sampled_inds.append(inds)\n            sampled_inds = torch.cat(sampled_inds)\n            if len(sampled_inds) < num_expected:\n                num_extra = num_expected - len(sampled_inds)\n                extra_inds = np.array(\n                    list(set(pos_inds.cpu()) - set(sampled_inds.cpu())))\n                if len(extra_inds) > num_extra:\n                    extra_inds = self.random_choice(extra_inds, num_extra)\n                extra_inds = torch.from_numpy(extra_inds).to(\n                    assign_result.gt_inds.device).long()\n                sampled_inds = torch.cat([sampled_inds, extra_inds])\n            elif len(sampled_inds) > num_expected:\n                sampled_inds = self.random_choice(sampled_inds, num_expected)\n            return sampled_inds\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom ..builder import BBOX_SAMPLERS\nfrom .random_sampler import RandomSampler\n\n\n@BBOX_SAMPLERS.register_module()\nclass IoUBalancedNegSampler(RandomSampler):\n    \"\"\"IoU Balanced Sampling.\n\n    arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n    Sampling proposals according to their IoU. `floor_fraction` of needed RoIs\n    are sampled from proposals whose IoU are lower than `floor_thr` randomly.\n    The others are sampled from proposals whose IoU are higher than\n    `floor_thr`. These proposals are sampled from some bins evenly, which are\n    split by `num_bins` via IoU evenly.\n\n    Args:\n        num (int): number of proposals.\n        pos_fraction (float): fraction of positive proposals.\n        floor_thr (float): threshold (minimum) IoU for IoU balanced sampling,\n            set to -1 if all using IoU balanced sampling.\n        floor_fraction (float): sampling fraction of proposals under floor_thr.\n        num_bins (int): number of bins in IoU balanced sampling.\n    \"\"\"\n\n    def __init__(self,\n                 num,\n                 pos_fraction,\n                 floor_thr=-1,\n                 floor_fraction=0,\n                 num_bins=3,\n                 **kwargs):\n        super(IoUBalancedNegSampler, self).__init__(num, pos_fraction,\n                                                    **kwargs)\n        assert floor_thr >= 0 or floor_thr == -1\n        assert 0 <= floor_fraction <= 1\n        assert num_bins >= 1\n\n        self.floor_thr = floor_thr\n        self.floor_fraction = floor_fraction\n        self.num_bins = num_bins\n\n    def sample_via_interval(self, max_overlaps, full_set, num_expected):\n        \"\"\"Sample according to the iou interval.\n\n        Args:\n            max_overlaps (torch.Tensor): IoU between bounding boxes and ground\n                truth boxes.\n            full_set (set(int)): A full set of indices of boxes。\n            num_expected (int): Number of expected samples。\n\n        Returns:\n            np.ndarray: Indices  of samples\n        \"\"\"\n        max_iou = max_overlaps.max()\n        iou_interval = (max_iou - self.floor_thr) / self.num_bins\n        per_num_expected = int(num_expected / self.num_bins)\n\n        sampled_inds = []\n        for i in range(self.num_bins):\n            start_iou = self.floor_thr + i * iou_interval\n            end_iou = self.floor_thr + (i + 1) * iou_interval\n            tmp_set = set(\n                np.where(\n                    np.logical_and(max_overlaps >= start_iou,\n                                   max_overlaps < end_iou))[0])\n            tmp_inds = list(tmp_set & full_set)\n            if len(tmp_inds) > per_num_expected:\n                tmp_sampled_set = self.random_choice(tmp_inds,\n                                                     per_num_expected)\n            else:\n                tmp_sampled_set = np.array(tmp_inds, dtype=np.int)\n            sampled_inds.append(tmp_sampled_set)\n\n        sampled_inds = np.concatenate(sampled_inds)\n        if len(sampled_inds) < num_expected:\n            num_extra = num_expected - len(sampled_inds)\n            extra_inds = np.array(list(full_set - set(sampled_inds)))\n            if len(extra_inds) > num_extra:\n                extra_inds = self.random_choice(extra_inds, num_extra)\n            sampled_inds = np.concatenate([sampled_inds, extra_inds])\n\n        return sampled_inds\n\n    def _sample_neg(self, assign_result, num_expected, **kwargs):\n        \"\"\"Sample negative boxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): The assigned results of boxes.\n            num_expected (int): The number of expected negative samples\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)\n        if neg_inds.numel() != 0:\n            neg_inds = neg_inds.squeeze(1)\n        if len(neg_inds) <= num_expected:\n            return neg_inds\n        else:\n            max_overlaps = assign_result.max_overlaps.cpu().numpy()\n            # balance sampling for negative samples\n            neg_set = set(neg_inds.cpu().numpy())\n\n            if self.floor_thr > 0:\n                floor_set = set(\n                    np.where(\n                        np.logical_and(max_overlaps >= 0,\n                                       max_overlaps < self.floor_thr))[0])\n                iou_sampling_set = set(\n                    np.where(max_overlaps >= self.floor_thr)[0])\n            elif self.floor_thr == 0:\n                floor_set = set(np.where(max_overlaps == 0)[0])\n                iou_sampling_set = set(\n                    np.where(max_overlaps > self.floor_thr)[0])\n            else:\n                floor_set = set()\n                iou_sampling_set = set(\n                    np.where(max_overlaps > self.floor_thr)[0])\n                # for sampling interval calculation\n                self.floor_thr = 0\n\n            floor_neg_inds = list(floor_set & neg_set)\n            iou_sampling_neg_inds = list(iou_sampling_set & neg_set)\n            num_expected_iou_sampling = int(num_expected *\n                                            (1 - self.floor_fraction))\n            if len(iou_sampling_neg_inds) > num_expected_iou_sampling:\n                if self.num_bins >= 2:\n                    iou_sampled_inds = self.sample_via_interval(\n                        max_overlaps, set(iou_sampling_neg_inds),\n                        num_expected_iou_sampling)\n                else:\n                    iou_sampled_inds = self.random_choice(\n                        iou_sampling_neg_inds, num_expected_iou_sampling)\n            else:\n                iou_sampled_inds = np.array(\n                    iou_sampling_neg_inds, dtype=np.int)\n            num_expected_floor = num_expected - len(iou_sampled_inds)\n            if len(floor_neg_inds) > num_expected_floor:\n                sampled_floor_inds = self.random_choice(\n                    floor_neg_inds, num_expected_floor)\n            else:\n                sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int)\n            sampled_inds = np.concatenate(\n                (sampled_floor_inds, iou_sampled_inds))\n            if len(sampled_inds) < num_expected:\n                num_extra = num_expected - len(sampled_inds)\n                extra_inds = np.array(list(neg_set - set(sampled_inds)))\n                if len(extra_inds) > num_extra:\n                    extra_inds = self.random_choice(extra_inds, num_extra)\n                sampled_inds = np.concatenate((sampled_inds, extra_inds))\n            sampled_inds = torch.from_numpy(sampled_inds).long().to(\n                assign_result.gt_inds.device)\n            return sampled_inds\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/mask_pseudo_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"copy from\nhttps://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.\"\"\"\n\nimport torch\n\nfrom mmdet.core.bbox.builder import BBOX_SAMPLERS\nfrom .base_sampler import BaseSampler\nfrom .mask_sampling_result import MaskSamplingResult\n\n\n@BBOX_SAMPLERS.register_module()\nclass MaskPseudoSampler(BaseSampler):\n    \"\"\"A pseudo sampler that does not do sampling actually.\"\"\"\n\n    def __init__(self, **kwargs):\n        pass\n\n    def _sample_pos(self, **kwargs):\n        \"\"\"Sample positive samples.\"\"\"\n        raise NotImplementedError\n\n    def _sample_neg(self, **kwargs):\n        \"\"\"Sample negative samples.\"\"\"\n        raise NotImplementedError\n\n    def sample(self, assign_result, masks, gt_masks, **kwargs):\n        \"\"\"Directly returns the positive and negative indices  of samples.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Assigned results\n            masks (torch.Tensor): Bounding boxes\n            gt_masks (torch.Tensor): Ground truth boxes\n        Returns:\n            :obj:`SamplingResult`: sampler results\n        \"\"\"\n        pos_inds = torch.nonzero(\n            assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()\n        neg_inds = torch.nonzero(\n            assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()\n        gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8)\n        sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks,\n                                             gt_masks, assign_result, gt_flags)\n        return sampling_result\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/mask_sampling_result.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"copy from\nhttps://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.\"\"\"\n\nimport torch\n\nfrom .sampling_result import SamplingResult\n\n\nclass MaskSamplingResult(SamplingResult):\n    \"\"\"Mask sampling result.\"\"\"\n\n    def __init__(self, pos_inds, neg_inds, masks, gt_masks, assign_result,\n                 gt_flags):\n        self.pos_inds = pos_inds\n        self.neg_inds = neg_inds\n        self.pos_masks = masks[pos_inds]\n        self.neg_masks = masks[neg_inds]\n        self.pos_is_gt = gt_flags[pos_inds]\n\n        self.num_gts = gt_masks.shape[0]\n        self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\n\n        if gt_masks.numel() == 0:\n            # hack for index error case\n            assert self.pos_assigned_gt_inds.numel() == 0\n            self.pos_gt_masks = torch.empty_like(gt_masks)\n        else:\n            self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :]\n\n        if assign_result.labels is not None:\n            self.pos_gt_labels = assign_result.labels[pos_inds]\n        else:\n            self.pos_gt_labels = None\n\n    @property\n    def masks(self):\n        \"\"\"torch.Tensor: concatenated positive and negative boxes\"\"\"\n        return torch.cat([self.pos_masks, self.neg_masks])\n\n    def __nice__(self):\n        data = self.info.copy()\n        data['pos_masks'] = data.pop('pos_masks').shape\n        data['neg_masks'] = data.pop('neg_masks').shape\n        parts = [f\"'{k}': {v!r}\" for k, v in sorted(data.items())]\n        body = '    ' + ',\\n    '.join(parts)\n        return '{\\n' + body + '\\n}'\n\n    @property\n    def info(self):\n        \"\"\"Returns a dictionary of info about the object.\"\"\"\n        return {\n            'pos_inds': self.pos_inds,\n            'neg_inds': self.neg_inds,\n            'pos_masks': self.pos_masks,\n            'neg_masks': self.neg_masks,\n            'pos_is_gt': self.pos_is_gt,\n            'num_gts': self.num_gts,\n            'pos_assigned_gt_inds': self.pos_assigned_gt_inds,\n        }\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/ohem_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_SAMPLERS\nfrom ..transforms import bbox2roi\nfrom .base_sampler import BaseSampler\n\n\n@BBOX_SAMPLERS.register_module()\nclass OHEMSampler(BaseSampler):\n    r\"\"\"Online Hard Example Mining Sampler described in `Training Region-based\n    Object Detectors with Online Hard Example Mining\n    <https://arxiv.org/abs/1604.03540>`_.\n    \"\"\"\n\n    def __init__(self,\n                 num,\n                 pos_fraction,\n                 context,\n                 neg_pos_ub=-1,\n                 add_gt_as_proposals=True,\n                 loss_key='loss_cls',\n                 **kwargs):\n        super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub,\n                                          add_gt_as_proposals)\n        self.context = context\n        if not hasattr(self.context, 'num_stages'):\n            self.bbox_head = self.context.bbox_head\n        else:\n            self.bbox_head = self.context.bbox_head[self.context.current_stage]\n\n        self.loss_key = loss_key\n\n    def hard_mining(self, inds, num_expected, bboxes, labels, feats):\n        with torch.no_grad():\n            rois = bbox2roi([bboxes])\n            if not hasattr(self.context, 'num_stages'):\n                bbox_results = self.context._bbox_forward(feats, rois)\n            else:\n                bbox_results = self.context._bbox_forward(\n                    self.context.current_stage, feats, rois)\n            cls_score = bbox_results['cls_score']\n            loss = self.bbox_head.loss(\n                cls_score=cls_score,\n                bbox_pred=None,\n                rois=rois,\n                labels=labels,\n                label_weights=cls_score.new_ones(cls_score.size(0)),\n                bbox_targets=None,\n                bbox_weights=None,\n                reduction_override='none')[self.loss_key]\n            _, topk_loss_inds = loss.topk(num_expected)\n        return inds[topk_loss_inds]\n\n    def _sample_pos(self,\n                    assign_result,\n                    num_expected,\n                    bboxes=None,\n                    feats=None,\n                    **kwargs):\n        \"\"\"Sample positive boxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Assigned results\n            num_expected (int): Number of expected positive samples\n            bboxes (torch.Tensor, optional): Boxes. Defaults to None.\n            feats (list[torch.Tensor], optional): Multi-level features.\n                Defaults to None.\n\n        Returns:\n            torch.Tensor: Indices  of positive samples\n        \"\"\"\n        # Sample some hard positive samples\n        pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)\n        if pos_inds.numel() != 0:\n            pos_inds = pos_inds.squeeze(1)\n        if pos_inds.numel() <= num_expected:\n            return pos_inds\n        else:\n            return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds],\n                                    assign_result.labels[pos_inds], feats)\n\n    def _sample_neg(self,\n                    assign_result,\n                    num_expected,\n                    bboxes=None,\n                    feats=None,\n                    **kwargs):\n        \"\"\"Sample negative boxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Assigned results\n            num_expected (int): Number of expected negative samples\n            bboxes (torch.Tensor, optional): Boxes. Defaults to None.\n            feats (list[torch.Tensor], optional): Multi-level features.\n                Defaults to None.\n\n        Returns:\n            torch.Tensor: Indices  of negative samples\n        \"\"\"\n        # Sample some hard negative samples\n        neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)\n        if neg_inds.numel() != 0:\n            neg_inds = neg_inds.squeeze(1)\n        if len(neg_inds) <= num_expected:\n            return neg_inds\n        else:\n            neg_labels = assign_result.labels.new_empty(\n                neg_inds.size(0)).fill_(self.bbox_head.num_classes)\n            return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds],\n                                    neg_labels, feats)\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/pseudo_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_SAMPLERS\nfrom .base_sampler import BaseSampler\nfrom .sampling_result import SamplingResult\n\n\n@BBOX_SAMPLERS.register_module()\nclass PseudoSampler(BaseSampler):\n    \"\"\"A pseudo sampler that does not do sampling actually.\"\"\"\n\n    def __init__(self, **kwargs):\n        pass\n\n    def _sample_pos(self, **kwargs):\n        \"\"\"Sample positive samples.\"\"\"\n        raise NotImplementedError\n\n    def _sample_neg(self, **kwargs):\n        \"\"\"Sample negative samples.\"\"\"\n        raise NotImplementedError\n\n    def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs):\n        \"\"\"Directly returns the positive and negative indices  of samples.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Assigned results\n            bboxes (torch.Tensor): Bounding boxes\n            gt_bboxes (torch.Tensor): Ground truth boxes\n\n        Returns:\n            :obj:`SamplingResult`: sampler results\n        \"\"\"\n        pos_inds = torch.nonzero(\n            assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()\n        neg_inds = torch.nonzero(\n            assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()\n        gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8)\n        sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,\n                                         assign_result, gt_flags)\n        return sampling_result\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/random_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_SAMPLERS\nfrom .base_sampler import BaseSampler\n\n\n@BBOX_SAMPLERS.register_module()\nclass RandomSampler(BaseSampler):\n    \"\"\"Random sampler.\n\n    Args:\n        num (int): Number of samples\n        pos_fraction (float): Fraction of positive samples\n        neg_pos_ub (int, optional): Upper bound number of negative and\n            positive samples. Defaults to -1.\n        add_gt_as_proposals (bool, optional): Whether to add ground truth\n            boxes as proposals. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 num,\n                 pos_fraction,\n                 neg_pos_ub=-1,\n                 add_gt_as_proposals=True,\n                 **kwargs):\n        from mmdet.core.bbox import demodata\n        super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub,\n                                            add_gt_as_proposals)\n        self.rng = demodata.ensure_rng(kwargs.get('rng', None))\n\n    def random_choice(self, gallery, num):\n        \"\"\"Random select some elements from the gallery.\n\n        If `gallery` is a Tensor, the returned indices will be a Tensor;\n        If `gallery` is a ndarray or list, the returned indices will be a\n        ndarray.\n\n        Args:\n            gallery (Tensor | ndarray | list): indices pool.\n            num (int): expected sample num.\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        assert len(gallery) >= num\n\n        is_tensor = isinstance(gallery, torch.Tensor)\n        if not is_tensor:\n            if torch.cuda.is_available():\n                device = torch.cuda.current_device()\n            else:\n                device = 'cpu'\n            gallery = torch.tensor(gallery, dtype=torch.long, device=device)\n        # This is a temporary fix. We can revert the following code\n        # when PyTorch fixes the abnormal return of torch.randperm.\n        # See: https://github.com/open-mmlab/mmdetection/pull/5014\n        perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device)\n        rand_inds = gallery[perm]\n        if not is_tensor:\n            rand_inds = rand_inds.cpu().numpy()\n        return rand_inds\n\n    def _sample_pos(self, assign_result, num_expected, **kwargs):\n        \"\"\"Randomly sample some positive samples.\"\"\"\n        pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)\n        if pos_inds.numel() != 0:\n            pos_inds = pos_inds.squeeze(1)\n        if pos_inds.numel() <= num_expected:\n            return pos_inds\n        else:\n            return self.random_choice(pos_inds, num_expected)\n\n    def _sample_neg(self, assign_result, num_expected, **kwargs):\n        \"\"\"Randomly sample some negative samples.\"\"\"\n        neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)\n        if neg_inds.numel() != 0:\n            neg_inds = neg_inds.squeeze(1)\n        if len(neg_inds) <= num_expected:\n            return neg_inds\n        else:\n            return self.random_choice(neg_inds, num_expected)\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/sampling_result.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.utils import util_mixins\n\n\nclass SamplingResult(util_mixins.NiceRepr):\n    \"\"\"Bbox sampling result.\n\n    Example:\n        >>> # xdoctest: +IGNORE_WANT\n        >>> from mmdet.core.bbox.samplers.sampling_result import *  # NOQA\n        >>> self = SamplingResult.random(rng=10)\n        >>> print(f'self = {self}')\n        self = <SamplingResult({\n            'neg_bboxes': torch.Size([12, 4]),\n            'neg_inds': tensor([ 0,  1,  2,  4,  5,  6,  7,  8,  9, 10, 11, 12]),\n            'num_gts': 4,\n            'pos_assigned_gt_inds': tensor([], dtype=torch.int64),\n            'pos_bboxes': torch.Size([0, 4]),\n            'pos_inds': tensor([], dtype=torch.int64),\n            'pos_is_gt': tensor([], dtype=torch.uint8)\n        })>\n    \"\"\"\n\n    def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,\n                 gt_flags):\n        self.pos_inds = pos_inds\n        self.neg_inds = neg_inds\n        self.pos_bboxes = bboxes[pos_inds]\n        self.neg_bboxes = bboxes[neg_inds]\n        self.pos_is_gt = gt_flags[pos_inds]\n\n        self.num_gts = gt_bboxes.shape[0]\n        self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\n\n        if gt_bboxes.numel() == 0:\n            # hack for index error case\n            assert self.pos_assigned_gt_inds.numel() == 0\n            self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)\n        else:\n            if len(gt_bboxes.shape) < 2:\n                gt_bboxes = gt_bboxes.view(-1, 4)\n\n            self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long(), :]\n\n        if assign_result.labels is not None:\n            self.pos_gt_labels = assign_result.labels[pos_inds]\n        else:\n            self.pos_gt_labels = None\n\n    @property\n    def bboxes(self):\n        \"\"\"torch.Tensor: concatenated positive and negative boxes\"\"\"\n        return torch.cat([self.pos_bboxes, self.neg_bboxes])\n\n    def to(self, device):\n        \"\"\"Change the device of the data inplace.\n\n        Example:\n            >>> self = SamplingResult.random()\n            >>> print(f'self = {self.to(None)}')\n            >>> # xdoctest: +REQUIRES(--gpu)\n            >>> print(f'self = {self.to(0)}')\n        \"\"\"\n        _dict = self.__dict__\n        for key, value in _dict.items():\n            if isinstance(value, torch.Tensor):\n                _dict[key] = value.to(device)\n        return self\n\n    def __nice__(self):\n        data = self.info.copy()\n        data['pos_bboxes'] = data.pop('pos_bboxes').shape\n        data['neg_bboxes'] = data.pop('neg_bboxes').shape\n        parts = [f\"'{k}': {v!r}\" for k, v in sorted(data.items())]\n        body = '    ' + ',\\n    '.join(parts)\n        return '{\\n' + body + '\\n}'\n\n    @property\n    def info(self):\n        \"\"\"Returns a dictionary of info about the object.\"\"\"\n        return {\n            'pos_inds': self.pos_inds,\n            'neg_inds': self.neg_inds,\n            'pos_bboxes': self.pos_bboxes,\n            'neg_bboxes': self.neg_bboxes,\n            'pos_is_gt': self.pos_is_gt,\n            'num_gts': self.num_gts,\n            'pos_assigned_gt_inds': self.pos_assigned_gt_inds,\n        }\n\n    @classmethod\n    def random(cls, rng=None, **kwargs):\n        \"\"\"\n        Args:\n            rng (None | int | numpy.random.RandomState): seed or state.\n            kwargs (keyword arguments):\n                - num_preds: number of predicted boxes\n                - num_gts: number of true boxes\n                - p_ignore (float): probability of a predicted box assigned to \\\n                    an ignored truth.\n                - p_assigned (float): probability of a predicted box not being \\\n                    assigned.\n                - p_use_label (float | bool): with labels or not.\n\n        Returns:\n            :obj:`SamplingResult`: Randomly generated sampling result.\n\n        Example:\n            >>> from mmdet.core.bbox.samplers.sampling_result import *  # NOQA\n            >>> self = SamplingResult.random()\n            >>> print(self.__dict__)\n        \"\"\"\n        from mmdet.core.bbox import demodata\n        from mmdet.core.bbox.assigners.assign_result import AssignResult\n        from mmdet.core.bbox.samplers.random_sampler import RandomSampler\n        rng = demodata.ensure_rng(rng)\n\n        # make probabilistic?\n        num = 32\n        pos_fraction = 0.5\n        neg_pos_ub = -1\n\n        assign_result = AssignResult.random(rng=rng, **kwargs)\n\n        # Note we could just compute an assignment\n        bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng)\n        gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng)\n\n        if rng.rand() > 0.2:\n            # sometimes algorithms squeeze their data, be robust to that\n            gt_bboxes = gt_bboxes.squeeze()\n            bboxes = bboxes.squeeze()\n\n        if assign_result.labels is None:\n            gt_labels = None\n        else:\n            gt_labels = None  # todo\n\n        if gt_labels is None:\n            add_gt_as_proposals = False\n        else:\n            add_gt_as_proposals = True  # make probabilistic?\n\n        sampler = RandomSampler(\n            num,\n            pos_fraction,\n            neg_pos_ub=neg_pos_ub,\n            add_gt_as_proposals=add_gt_as_proposals,\n            rng=rng)\n        self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)\n        return self\n"
  },
  {
    "path": "mmdet/core/bbox/samplers/score_hlr_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.ops import nms_match\n\nfrom ..builder import BBOX_SAMPLERS\nfrom ..transforms import bbox2roi\nfrom .base_sampler import BaseSampler\nfrom .sampling_result import SamplingResult\n\n\n@BBOX_SAMPLERS.register_module()\nclass ScoreHLRSampler(BaseSampler):\n    r\"\"\"Importance-based Sample Reweighting (ISR_N), described in `Prime Sample\n    Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_.\n\n    Score hierarchical local rank (HLR) differentiates with RandomSampler in\n    negative part. It firstly computes Score-HLR in a two-step way,\n    then linearly maps score hlr to the loss weights.\n\n    Args:\n        num (int): Total number of sampled RoIs.\n        pos_fraction (float): Fraction of positive samples.\n        context (:class:`BaseRoIHead`): RoI head that the sampler belongs to.\n        neg_pos_ub (int): Upper bound of the ratio of num negative to num\n            positive, -1 means no upper bound.\n        add_gt_as_proposals (bool): Whether to add ground truth as proposals.\n        k (float): Power of the non-linear mapping.\n        bias (float): Shift of the non-linear mapping.\n        score_thr (float): Minimum score that a negative sample is to be\n            considered as valid bbox.\n    \"\"\"\n\n    def __init__(self,\n                 num,\n                 pos_fraction,\n                 context,\n                 neg_pos_ub=-1,\n                 add_gt_as_proposals=True,\n                 k=0.5,\n                 bias=0,\n                 score_thr=0.05,\n                 iou_thr=0.5,\n                 **kwargs):\n        super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals)\n        self.k = k\n        self.bias = bias\n        self.score_thr = score_thr\n        self.iou_thr = iou_thr\n        self.context = context\n        # context of cascade detectors is a list, so distinguish them here.\n        if not hasattr(context, 'num_stages'):\n            self.bbox_roi_extractor = context.bbox_roi_extractor\n            self.bbox_head = context.bbox_head\n            self.with_shared_head = context.with_shared_head\n            if self.with_shared_head:\n                self.shared_head = context.shared_head\n        else:\n            self.bbox_roi_extractor = context.bbox_roi_extractor[\n                context.current_stage]\n            self.bbox_head = context.bbox_head[context.current_stage]\n\n    @staticmethod\n    def random_choice(gallery, num):\n        \"\"\"Randomly select some elements from the gallery.\n\n        If `gallery` is a Tensor, the returned indices will be a Tensor;\n        If `gallery` is a ndarray or list, the returned indices will be a\n        ndarray.\n\n        Args:\n            gallery (Tensor | ndarray | list): indices pool.\n            num (int): expected sample num.\n\n        Returns:\n            Tensor or ndarray: sampled indices.\n        \"\"\"\n        assert len(gallery) >= num\n\n        is_tensor = isinstance(gallery, torch.Tensor)\n        if not is_tensor:\n            if torch.cuda.is_available():\n                device = torch.cuda.current_device()\n            else:\n                device = 'cpu'\n            gallery = torch.tensor(gallery, dtype=torch.long, device=device)\n        perm = torch.randperm(gallery.numel(), device=gallery.device)[:num]\n        rand_inds = gallery[perm]\n        if not is_tensor:\n            rand_inds = rand_inds.cpu().numpy()\n        return rand_inds\n\n    def _sample_pos(self, assign_result, num_expected, **kwargs):\n        \"\"\"Randomly sample some positive samples.\"\"\"\n        pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten()\n        if pos_inds.numel() <= num_expected:\n            return pos_inds\n        else:\n            return self.random_choice(pos_inds, num_expected)\n\n    def _sample_neg(self,\n                    assign_result,\n                    num_expected,\n                    bboxes,\n                    feats=None,\n                    img_meta=None,\n                    **kwargs):\n        \"\"\"Sample negative samples.\n\n        Score-HLR sampler is done in the following steps:\n        1. Take the maximum positive score prediction of each negative samples\n            as s_i.\n        2. Filter out negative samples whose s_i <= score_thr, the left samples\n            are called valid samples.\n        3. Use NMS-Match to divide valid samples into different groups,\n            samples in the same group will greatly overlap with each other\n        4. Rank the matched samples in two-steps to get Score-HLR.\n            (1) In the same group, rank samples with their scores.\n            (2) In the same score rank across different groups,\n                rank samples with their scores again.\n        5. Linearly map Score-HLR to the final label weights.\n\n        Args:\n            assign_result (:obj:`AssignResult`): result of assigner.\n            num_expected (int): Expected number of samples.\n            bboxes (Tensor): bbox to be sampled.\n            feats (Tensor): Features come from FPN.\n            img_meta (dict): Meta information dictionary.\n        \"\"\"\n        neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()\n        num_neg = neg_inds.size(0)\n        if num_neg == 0:\n            return neg_inds, None\n        with torch.no_grad():\n            neg_bboxes = bboxes[neg_inds]\n            neg_rois = bbox2roi([neg_bboxes])\n            bbox_result = self.context._bbox_forward(feats, neg_rois)\n            cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[\n                'bbox_pred']\n\n            ori_loss = self.bbox_head.loss(\n                cls_score=cls_score,\n                bbox_pred=None,\n                rois=None,\n                labels=neg_inds.new_full((num_neg, ),\n                                         self.bbox_head.num_classes),\n                label_weights=cls_score.new_ones(num_neg),\n                bbox_targets=None,\n                bbox_weights=None,\n                reduction_override='none')['loss_cls']\n\n            # filter out samples with the max score lower than score_thr\n            max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)\n            valid_inds = (max_score > self.score_thr).nonzero().view(-1)\n            invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)\n            num_valid = valid_inds.size(0)\n            num_invalid = invalid_inds.size(0)\n\n            num_expected = min(num_neg, num_expected)\n            num_hlr = min(num_valid, num_expected)\n            num_rand = num_expected - num_hlr\n            if num_valid > 0:\n                valid_rois = neg_rois[valid_inds]\n                valid_max_score = max_score[valid_inds]\n                valid_argmax_score = argmax_score[valid_inds]\n                valid_bbox_pred = bbox_pred[valid_inds]\n\n                # valid_bbox_pred shape: [num_valid, #num_classes, 4]\n                valid_bbox_pred = valid_bbox_pred.view(\n                    valid_bbox_pred.size(0), -1, 4)\n                selected_bbox_pred = valid_bbox_pred[range(num_valid),\n                                                     valid_argmax_score]\n                pred_bboxes = self.bbox_head.bbox_coder.decode(\n                    valid_rois[:, 1:], selected_bbox_pred)\n                pred_bboxes_with_score = torch.cat(\n                    [pred_bboxes, valid_max_score[:, None]], -1)\n                group = nms_match(pred_bboxes_with_score, self.iou_thr)\n\n                # imp: importance\n                imp = cls_score.new_zeros(num_valid)\n                for g in group:\n                    g_score = valid_max_score[g]\n                    # g_score has already sorted\n                    rank = g_score.new_tensor(range(g_score.size(0)))\n                    imp[g] = num_valid - rank + g_score\n                _, imp_rank_inds = imp.sort(descending=True)\n                _, imp_rank = imp_rank_inds.sort()\n                hlr_inds = imp_rank_inds[:num_expected]\n\n                if num_rand > 0:\n                    rand_inds = torch.randperm(num_invalid)[:num_rand]\n                    select_inds = torch.cat(\n                        [valid_inds[hlr_inds], invalid_inds[rand_inds]])\n                else:\n                    select_inds = valid_inds[hlr_inds]\n\n                neg_label_weights = cls_score.new_ones(num_expected)\n\n                up_bound = max(num_expected, num_valid)\n                imp_weights = (up_bound -\n                               imp_rank[hlr_inds].float()) / up_bound\n                neg_label_weights[:num_hlr] = imp_weights\n                neg_label_weights[num_hlr:] = imp_weights.min()\n                neg_label_weights = (self.bias +\n                                     (1 - self.bias) * neg_label_weights).pow(\n                                         self.k)\n                ori_selected_loss = ori_loss[select_inds]\n                new_loss = ori_selected_loss * neg_label_weights\n                norm_ratio = ori_selected_loss.sum() / new_loss.sum()\n                neg_label_weights *= norm_ratio\n            else:\n                neg_label_weights = cls_score.new_ones(num_expected)\n                select_inds = torch.randperm(num_neg)[:num_expected]\n\n            return neg_inds[select_inds], neg_label_weights\n\n    def sample(self,\n               assign_result,\n               bboxes,\n               gt_bboxes,\n               gt_labels=None,\n               img_meta=None,\n               **kwargs):\n        \"\"\"Sample positive and negative bboxes.\n\n        This is a simple implementation of bbox sampling given candidates,\n        assigning results and ground truth bboxes.\n\n        Args:\n            assign_result (:obj:`AssignResult`): Bbox assigning results.\n            bboxes (Tensor): Boxes to be sampled from.\n            gt_bboxes (Tensor): Ground truth bboxes.\n            gt_labels (Tensor, optional): Class labels of ground truth bboxes.\n\n        Returns:\n            tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negative\n                label weights.\n        \"\"\"\n        bboxes = bboxes[:, :4]\n\n        gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8)\n        if self.add_gt_as_proposals:\n            bboxes = torch.cat([gt_bboxes, bboxes], dim=0)\n            assign_result.add_gt_(gt_labels)\n            gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8)\n            gt_flags = torch.cat([gt_ones, gt_flags])\n\n        num_expected_pos = int(self.num * self.pos_fraction)\n        pos_inds = self.pos_sampler._sample_pos(\n            assign_result, num_expected_pos, bboxes=bboxes, **kwargs)\n        num_sampled_pos = pos_inds.numel()\n        num_expected_neg = self.num - num_sampled_pos\n        if self.neg_pos_ub >= 0:\n            _pos = max(1, num_sampled_pos)\n            neg_upper_bound = int(self.neg_pos_ub * _pos)\n            if num_expected_neg > neg_upper_bound:\n                num_expected_neg = neg_upper_bound\n        neg_inds, neg_label_weights = self.neg_sampler._sample_neg(\n            assign_result,\n            num_expected_neg,\n            bboxes,\n            img_meta=img_meta,\n            **kwargs)\n\n        return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes,\n                              assign_result, gt_flags), neg_label_weights\n"
  },
  {
    "path": "mmdet/core/bbox/transforms.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\n\ndef find_inside_bboxes(bboxes, img_h, img_w):\n    \"\"\"Find bboxes as long as a part of bboxes is inside the image.\n\n    Args:\n        bboxes (Tensor): Shape (N, 4).\n        img_h (int): Image height.\n        img_w (int): Image width.\n\n    Returns:\n        Tensor: Index of the remaining bboxes.\n    \"\"\"\n    inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \\\n        & (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0)\n    return inside_inds\n\n\ndef bbox_flip(bboxes, img_shape, direction='horizontal'):\n    \"\"\"Flip bboxes horizontally or vertically.\n\n    Args:\n        bboxes (Tensor): Shape (..., 4*k)\n        img_shape (tuple): Image shape.\n        direction (str): Flip direction, options are \"horizontal\", \"vertical\",\n            \"diagonal\". Default: \"horizontal\"\n\n    Returns:\n        Tensor: Flipped bboxes.\n    \"\"\"\n    assert bboxes.shape[-1] % 4 == 0\n    assert direction in ['horizontal', 'vertical', 'diagonal']\n    flipped = bboxes.clone()\n    if direction == 'horizontal':\n        flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]\n        flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]\n    elif direction == 'vertical':\n        flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]\n        flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]\n    else:\n        flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4]\n        flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4]\n        flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4]\n        flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4]\n    return flipped\n\n\ndef bbox_mapping(bboxes,\n                 img_shape,\n                 scale_factor,\n                 flip,\n                 flip_direction='horizontal'):\n    \"\"\"Map bboxes from the original image scale to testing scale.\"\"\"\n    new_bboxes = bboxes * bboxes.new_tensor(scale_factor)\n    if flip:\n        new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction)\n    return new_bboxes\n\n\ndef bbox_mapping_back(bboxes,\n                      img_shape,\n                      scale_factor,\n                      flip,\n                      flip_direction='horizontal'):\n    \"\"\"Map bboxes from testing scale to original image scale.\"\"\"\n    new_bboxes = bbox_flip(bboxes, img_shape,\n                           flip_direction) if flip else bboxes\n    new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor)\n    return new_bboxes.view(bboxes.shape)\n\n\ndef bbox2roi(bbox_list):\n    \"\"\"Convert a list of bboxes to roi format.\n\n    Args:\n        bbox_list (list[Tensor]): a list of bboxes corresponding to a batch\n            of images.\n\n    Returns:\n        Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2]\n    \"\"\"\n    rois_list = []\n    for img_id, bboxes in enumerate(bbox_list):\n        if bboxes.size(0) > 0:\n            img_inds = bboxes.new_full((bboxes.size(0), 1), img_id)\n            rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1)\n        else:\n            rois = bboxes.new_zeros((0, 5))\n        rois_list.append(rois)\n    rois = torch.cat(rois_list, 0)\n    return rois\n\n\ndef roi2bbox(rois):\n    \"\"\"Convert rois to bounding box format.\n\n    Args:\n        rois (torch.Tensor): RoIs with the shape (n, 5) where the first\n            column indicates batch id of each RoI.\n\n    Returns:\n        list[torch.Tensor]: Converted boxes of corresponding rois.\n    \"\"\"\n    bbox_list = []\n    img_ids = torch.unique(rois[:, 0].cpu(), sorted=True)\n    for img_id in img_ids:\n        inds = (rois[:, 0] == img_id.item())\n        bbox = rois[inds, 1:]\n        bbox_list.append(bbox)\n    return bbox_list\n\n\ndef bbox2result(bboxes, labels, num_classes):\n    \"\"\"Convert detection results to a list of numpy arrays.\n\n    Args:\n        bboxes (torch.Tensor | np.ndarray): shape (n, 5)\n        labels (torch.Tensor | np.ndarray): shape (n, )\n        num_classes (int): class number, including background class\n\n    Returns:\n        list(ndarray): bbox results of each class\n    \"\"\"\n    if bboxes.shape[0] == 0:\n        return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)]\n    else:\n        if isinstance(bboxes, torch.Tensor):\n            bboxes = bboxes.detach().cpu().numpy()\n            labels = labels.detach().cpu().numpy()\n        return [bboxes[labels == i, :] for i in range(num_classes)]\n\n\ndef distance2bbox(points, distance, max_shape=None):\n    \"\"\"Decode distance prediction to bounding box.\n\n    Args:\n        points (Tensor): Shape (B, N, 2) or (N, 2).\n        distance (Tensor): Distance from the given point to 4\n            boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4)\n        max_shape (Sequence[int] or torch.Tensor or Sequence[\n            Sequence[int]],optional): Maximum bounds for boxes, specifies\n            (H, W, C) or (H, W). If priors shape is (B, N, 4), then\n            the max_shape should be a Sequence[Sequence[int]]\n            and the length of max_shape should also be B.\n\n    Returns:\n        Tensor: Boxes with shape (N, 4) or (B, N, 4)\n    \"\"\"\n\n    x1 = points[..., 0] - distance[..., 0]\n    y1 = points[..., 1] - distance[..., 1]\n    x2 = points[..., 0] + distance[..., 2]\n    y2 = points[..., 1] + distance[..., 3]\n\n    bboxes = torch.stack([x1, y1, x2, y2], -1)\n\n    if max_shape is not None:\n        if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export():\n            # speed up\n            bboxes[:, 0::2].clamp_(min=0, max=max_shape[1])\n            bboxes[:, 1::2].clamp_(min=0, max=max_shape[0])\n            return bboxes\n\n        # clip bboxes with dynamic `min` and `max` for onnx\n        if torch.onnx.is_in_onnx_export():\n            from mmdet.core.export import dynamic_clip_for_onnx\n            x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape)\n            bboxes = torch.stack([x1, y1, x2, y2], dim=-1)\n            return bboxes\n        if not isinstance(max_shape, torch.Tensor):\n            max_shape = x1.new_tensor(max_shape)\n        max_shape = max_shape[..., :2].type_as(x1)\n        if max_shape.ndim == 2:\n            assert bboxes.ndim == 3\n            assert max_shape.size(0) == bboxes.size(0)\n\n        min_xy = x1.new_tensor(0)\n        max_xy = torch.cat([max_shape, max_shape],\n                           dim=-1).flip(-1).unsqueeze(-2)\n        bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)\n        bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)\n\n    return bboxes\n\n\ndef bbox2distance(points, bbox, max_dis=None, eps=0.1):\n    \"\"\"Decode bounding box based on distances.\n\n    Args:\n        points (Tensor): Shape (n, 2), [x, y].\n        bbox (Tensor): Shape (n, 4), \"xyxy\" format\n        max_dis (float): Upper bound of the distance.\n        eps (float): a small value to ensure target < max_dis, instead <=\n\n    Returns:\n        Tensor: Decoded distances.\n    \"\"\"\n    left = points[:, 0] - bbox[:, 0]\n    top = points[:, 1] - bbox[:, 1]\n    right = bbox[:, 2] - points[:, 0]\n    bottom = bbox[:, 3] - points[:, 1]\n    if max_dis is not None:\n        left = left.clamp(min=0, max=max_dis - eps)\n        top = top.clamp(min=0, max=max_dis - eps)\n        right = right.clamp(min=0, max=max_dis - eps)\n        bottom = bottom.clamp(min=0, max=max_dis - eps)\n    return torch.stack([left, top, right, bottom], -1)\n\n\ndef bbox_rescale(bboxes, scale_factor=1.0):\n    \"\"\"Rescale bounding box w.r.t. scale_factor.\n\n    Args:\n        bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois\n        scale_factor (float): rescale factor\n\n    Returns:\n        Tensor: Rescaled bboxes.\n    \"\"\"\n    if bboxes.size(1) == 5:\n        bboxes_ = bboxes[:, 1:]\n        inds_ = bboxes[:, 0]\n    else:\n        bboxes_ = bboxes\n    cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5\n    cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5\n    w = bboxes_[:, 2] - bboxes_[:, 0]\n    h = bboxes_[:, 3] - bboxes_[:, 1]\n    w = w * scale_factor\n    h = h * scale_factor\n    x1 = cx - 0.5 * w\n    x2 = cx + 0.5 * w\n    y1 = cy - 0.5 * h\n    y2 = cy + 0.5 * h\n    if bboxes.size(1) == 5:\n        rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1)\n    else:\n        rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)\n    return rescaled_bboxes\n\n\ndef bbox_cxcywh_to_xyxy(bbox):\n    \"\"\"Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).\n\n    Args:\n        bbox (Tensor): Shape (n, 4) for bboxes.\n\n    Returns:\n        Tensor: Converted bboxes.\n    \"\"\"\n    cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1)\n    bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)]\n    return torch.cat(bbox_new, dim=-1)\n\n\ndef bbox_xyxy_to_cxcywh(bbox):\n    \"\"\"Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).\n\n    Args:\n        bbox (Tensor): Shape (n, 4) for bboxes.\n\n    Returns:\n        Tensor: Converted bboxes.\n    \"\"\"\n    x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1)\n    bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)]\n    return torch.cat(bbox_new, dim=-1)\n"
  },
  {
    "path": "mmdet/core/data_structures/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .general_data import GeneralData\nfrom .instance_data import InstanceData\n\n__all__ = ['GeneralData', 'InstanceData']\n"
  },
  {
    "path": "mmdet/core/data_structures/general_data.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport numpy as np\nimport torch\n\nfrom mmdet.utils.util_mixins import NiceRepr\n\n\nclass GeneralData(NiceRepr):\n    \"\"\"A general data structure of OpenMMlab.\n\n    A data structure that stores the meta information,\n    the annotations of the images or the model predictions,\n    which can be used in communication between components.\n\n    The attributes in `GeneralData` are divided into two parts,\n    the `meta_info_fields` and the `data_fields` respectively.\n\n        - `meta_info_fields`: Usually contains the\n          information about the image such as filename,\n          image_shape, pad_shape, etc. All attributes in\n          it are immutable once set,\n          but the user can add new meta information with\n          `set_meta_info` function, all information can be accessed\n          with methods `meta_info_keys`, `meta_info_values`,\n          `meta_info_items`.\n\n        - `data_fields`: Annotations or model predictions are\n          stored. The attributes can be accessed or modified by\n          dict-like or object-like operations, such as\n          `.` , `[]`, `in`, `del`, `pop(str)` `get(str)`, `keys()`,\n          `values()`, `items()`. Users can also apply tensor-like methods\n          to all obj:`torch.Tensor` in the `data_fileds`,\n          such as `.cuda()`, `.cpu()`, `.numpy()`, `device`, `.to()`\n          `.detach()`, `.numpy()`\n\n    Args:\n        meta_info (dict, optional): A dict contains the meta information\n            of single image. such as `img_shape`, `scale_factor`, etc.\n            Default: None.\n        data (dict, optional): A dict contains annotations of single image or\n            model predictions. Default: None.\n\n    Examples:\n        >>> from mmdet.core import GeneralData\n        >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))\n        >>> instance_data = GeneralData(meta_info=img_meta)\n        >>> img_shape in instance_data\n        True\n        >>> instance_data.det_labels = torch.LongTensor([0, 1, 2, 3])\n        >>> instance_data[\"det_scores\"] = torch.Tensor([0.01, 0.1, 0.2, 0.3])\n        >>> print(results)\n        <GeneralData(\n\n          META INFORMATION\n        img_shape: (800, 1196, 3)\n        pad_shape: (800, 1216, 3)\n\n          DATA FIELDS\n        shape of det_labels: torch.Size([4])\n        shape of det_scores: torch.Size([4])\n\n        ) at 0x7f84acd10f90>\n        >>> instance_data.det_scores\n        tensor([0.0100, 0.1000, 0.2000, 0.3000])\n        >>> instance_data.det_labels\n        tensor([0, 1, 2, 3])\n        >>> instance_data['det_labels']\n        tensor([0, 1, 2, 3])\n        >>> 'det_labels' in instance_data\n        True\n        >>> instance_data.img_shape\n        (800, 1196, 3)\n        >>> 'det_scores' in instance_data\n        True\n        >>> del instance_data.det_scores\n        >>> 'det_scores' in instance_data\n        False\n        >>> det_labels = instance_data.pop('det_labels', None)\n        >>> det_labels\n        tensor([0, 1, 2, 3])\n        >>> 'det_labels' in instance_data\n        >>> False\n    \"\"\"\n\n    def __init__(self, meta_info=None, data=None):\n\n        self._meta_info_fields = set()\n        self._data_fields = set()\n\n        if meta_info is not None:\n            self.set_meta_info(meta_info=meta_info)\n        if data is not None:\n            self.set_data(data)\n\n    def set_meta_info(self, meta_info):\n        \"\"\"Add meta information.\n\n        Args:\n            meta_info (dict): A dict contains the meta information\n                of image. such as `img_shape`, `scale_factor`, etc.\n                Default: None.\n        \"\"\"\n        assert isinstance(meta_info,\n                          dict), f'meta should be a `dict` but get {meta_info}'\n        meta = copy.deepcopy(meta_info)\n        for k, v in meta.items():\n            # should be consistent with original meta_info\n            if k in self._meta_info_fields:\n                ori_value = getattr(self, k)\n                if isinstance(ori_value, (torch.Tensor, np.ndarray)):\n                    if (ori_value == v).all():\n                        continue\n                    else:\n                        raise KeyError(\n                            f'img_meta_info {k} has been set as '\n                            f'{getattr(self, k)} before, which is immutable ')\n                elif ori_value == v:\n                    continue\n                else:\n                    raise KeyError(\n                        f'img_meta_info {k} has been set as '\n                        f'{getattr(self, k)} before, which is immutable ')\n            else:\n                self._meta_info_fields.add(k)\n                self.__dict__[k] = v\n\n    def set_data(self, data):\n        \"\"\"Update a dict to `data_fields`.\n\n        Args:\n            data (dict): A dict contains annotations of image or\n                model predictions. Default: None.\n        \"\"\"\n        assert isinstance(data,\n                          dict), f'meta should be a `dict` but get {data}'\n        for k, v in data.items():\n            self.__setattr__(k, v)\n\n    def new(self, meta_info=None, data=None):\n        \"\"\"Return a new results with same image meta information.\n\n        Args:\n            meta_info (dict, optional): A dict contains the meta information\n                of image. such as `img_shape`, `scale_factor`, etc.\n                Default: None.\n            data (dict, optional): A dict contains annotations of image or\n                model predictions. Default: None.\n        \"\"\"\n        new_data = self.__class__()\n        new_data.set_meta_info(dict(self.meta_info_items()))\n        if meta_info is not None:\n            new_data.set_meta_info(meta_info)\n        if data is not None:\n            new_data.set_data(data)\n        return new_data\n\n    def keys(self):\n        \"\"\"\n        Returns:\n            list: Contains all keys in data_fields.\n        \"\"\"\n        return [key for key in self._data_fields]\n\n    def meta_info_keys(self):\n        \"\"\"\n        Returns:\n            list: Contains all keys in meta_info_fields.\n        \"\"\"\n        return [key for key in self._meta_info_fields]\n\n    def values(self):\n        \"\"\"\n        Returns:\n            list: Contains all values in data_fields.\n        \"\"\"\n        return [getattr(self, k) for k in self.keys()]\n\n    def meta_info_values(self):\n        \"\"\"\n        Returns:\n            list: Contains all values in meta_info_fields.\n        \"\"\"\n        return [getattr(self, k) for k in self.meta_info_keys()]\n\n    def items(self):\n        for k in self.keys():\n            yield (k, getattr(self, k))\n\n    def meta_info_items(self):\n        for k in self.meta_info_keys():\n            yield (k, getattr(self, k))\n\n    def __setattr__(self, name, val):\n        if name in ('_meta_info_fields', '_data_fields'):\n            if not hasattr(self, name):\n                super().__setattr__(name, val)\n            else:\n                raise AttributeError(\n                    f'{name} has been used as a '\n                    f'private attribute, which is immutable. ')\n        else:\n            if name in self._meta_info_fields:\n                raise AttributeError(f'`{name}` is used in meta information,'\n                                     f'which is immutable')\n\n            self._data_fields.add(name)\n            super().__setattr__(name, val)\n\n    def __delattr__(self, item):\n\n        if item in ('_meta_info_fields', '_data_fields'):\n            raise AttributeError(f'{item} has been used as a '\n                                 f'private attribute, which is immutable. ')\n\n        if item in self._meta_info_fields:\n            raise KeyError(f'{item} is used in meta information, '\n                           f'which is immutable.')\n        super().__delattr__(item)\n        if item in self._data_fields:\n            self._data_fields.remove(item)\n\n    # dict-like methods\n    __setitem__ = __setattr__\n    __delitem__ = __delattr__\n\n    def __getitem__(self, name):\n        return getattr(self, name)\n\n    def get(self, *args):\n        assert len(args) < 3, '`get` get more than 2 arguments'\n        return self.__dict__.get(*args)\n\n    def pop(self, *args):\n        assert len(args) < 3, '`pop` get more than 2 arguments'\n        name = args[0]\n        if name in self._meta_info_fields:\n            raise KeyError(f'{name} is a key in meta information, '\n                           f'which is immutable')\n\n        if args[0] in self._data_fields:\n            self._data_fields.remove(args[0])\n            return self.__dict__.pop(*args)\n\n        # with default value\n        elif len(args) == 2:\n            return args[1]\n        else:\n            raise KeyError(f'{args[0]}')\n\n    def __contains__(self, item):\n        return item in self._data_fields or \\\n                    item in self._meta_info_fields\n\n    # Tensor-like methods\n    def to(self, *args, **kwargs):\n        \"\"\"Apply same name function to all tensors in data_fields.\"\"\"\n        new_data = self.new()\n        for k, v in self.items():\n            if hasattr(v, 'to'):\n                v = v.to(*args, **kwargs)\n            new_data[k] = v\n        return new_data\n\n    # Tensor-like methods\n    def cpu(self):\n        \"\"\"Apply same name function to all tensors in data_fields.\"\"\"\n        new_data = self.new()\n        for k, v in self.items():\n            if isinstance(v, torch.Tensor):\n                v = v.cpu()\n            new_data[k] = v\n        return new_data\n\n    # Tensor-like methods\n    def npu(self):\n        \"\"\"Apply same name function to all tensors in data_fields.\"\"\"\n        new_data = self.new()\n        for k, v in self.items():\n            if isinstance(v, torch.Tensor):\n                v = v.npu()\n            new_data[k] = v\n        return new_data\n\n    # Tensor-like methods\n    def mlu(self):\n        \"\"\"Apply same name function to all tensors in data_fields.\"\"\"\n        new_data = self.new()\n        for k, v in self.items():\n            if isinstance(v, torch.Tensor):\n                v = v.mlu()\n            new_data[k] = v\n        return new_data\n\n    # Tensor-like methods\n    def cuda(self):\n        \"\"\"Apply same name function to all tensors in data_fields.\"\"\"\n        new_data = self.new()\n        for k, v in self.items():\n            if isinstance(v, torch.Tensor):\n                v = v.cuda()\n            new_data[k] = v\n        return new_data\n\n    # Tensor-like methods\n    def detach(self):\n        \"\"\"Apply same name function to all tensors in data_fields.\"\"\"\n        new_data = self.new()\n        for k, v in self.items():\n            if isinstance(v, torch.Tensor):\n                v = v.detach()\n            new_data[k] = v\n        return new_data\n\n    # Tensor-like methods\n    def numpy(self):\n        \"\"\"Apply same name function to all tensors in data_fields.\"\"\"\n        new_data = self.new()\n        for k, v in self.items():\n            if isinstance(v, torch.Tensor):\n                v = v.detach().cpu().numpy()\n            new_data[k] = v\n        return new_data\n\n    def __nice__(self):\n        repr = '\\n \\n  META INFORMATION \\n'\n        for k, v in self.meta_info_items():\n            repr += f'{k}: {v} \\n'\n        repr += '\\n   DATA FIELDS \\n'\n        for k, v in self.items():\n            if isinstance(v, (torch.Tensor, np.ndarray)):\n                repr += f'shape of {k}: {v.shape} \\n'\n            else:\n                repr += f'{k}: {v} \\n'\n        return repr + '\\n'\n"
  },
  {
    "path": "mmdet/core/data_structures/instance_data.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\n\nimport numpy as np\nimport torch\n\nfrom .general_data import GeneralData\n\n\nclass InstanceData(GeneralData):\n    \"\"\"Data structure for instance-level annnotations or predictions.\n\n    Subclass of :class:`GeneralData`. All value in `data_fields`\n    should have the same length. This design refer to\n    https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501\n\n    Examples:\n        >>> from mmdet.core import InstanceData\n        >>> import numpy as np\n        >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3))\n        >>> results = InstanceData(img_meta)\n        >>> img_shape in results\n        True\n        >>> results.det_labels = torch.LongTensor([0, 1, 2, 3])\n        >>> results[\"det_scores\"] = torch.Tensor([0.01, 0.7, 0.6, 0.3])\n        >>> results[\"det_masks\"] = np.ndarray(4, 2, 2)\n        >>> len(results)\n        4\n        >>> print(resutls)\n        <InstanceData(\n\n            META INFORMATION\n        pad_shape: (800, 1216, 3)\n        img_shape: (800, 1196, 3)\n\n            PREDICTIONS\n        shape of det_labels: torch.Size([4])\n        shape of det_masks: (4, 2, 2)\n        shape of det_scores: torch.Size([4])\n\n        ) at 0x7fe26b5ca990>\n        >>> sorted_results = results[results.det_scores.sort().indices]\n        >>> sorted_results.det_scores\n        tensor([0.0100, 0.3000, 0.6000, 0.7000])\n        >>> sorted_results.det_labels\n        tensor([0, 3, 2, 1])\n        >>> print(results[results.scores > 0.5])\n        <InstanceData(\n\n            META INFORMATION\n        pad_shape: (800, 1216, 3)\n        img_shape: (800, 1196, 3)\n\n            PREDICTIONS\n        shape of det_labels: torch.Size([2])\n        shape of det_masks: (2, 2, 2)\n        shape of det_scores: torch.Size([2])\n\n        ) at 0x7fe26b6d7790>\n        >>> results[results.det_scores > 0.5].det_labels\n        tensor([1, 2])\n        >>> results[results.det_scores > 0.5].det_scores\n        tensor([0.7000, 0.6000])\n    \"\"\"\n\n    def __setattr__(self, name, value):\n\n        if name in ('_meta_info_fields', '_data_fields'):\n            if not hasattr(self, name):\n                super().__setattr__(name, value)\n            else:\n                raise AttributeError(\n                    f'{name} has been used as a '\n                    f'private attribute, which is immutable. ')\n\n        else:\n            assert isinstance(value, (torch.Tensor, np.ndarray, list)), \\\n                f'Can set {type(value)}, only support' \\\n                f' {(torch.Tensor, np.ndarray, list)}'\n\n            if self._data_fields:\n                assert len(value) == len(self), f'the length of ' \\\n                                             f'values {len(value)} is ' \\\n                                             f'not consistent with' \\\n                                             f' the length ' \\\n                                             f'of this :obj:`InstanceData` ' \\\n                                             f'{len(self)} '\n            super().__setattr__(name, value)\n\n    def __getitem__(self, item):\n        \"\"\"\n        Args:\n            item (str, obj:`slice`,\n                obj`torch.LongTensor`, obj:`torch.BoolTensor`):\n                get the corresponding values according to item.\n\n        Returns:\n            obj:`InstanceData`: Corresponding values.\n        \"\"\"\n        assert len(self), ' This is a empty instance'\n\n        assert isinstance(\n            item, (str, slice, int, torch.LongTensor, torch.BoolTensor))\n\n        if isinstance(item, str):\n            return getattr(self, item)\n\n        if type(item) == int:\n            if item >= len(self) or item < -len(self):\n                raise IndexError(f'Index {item} out of range!')\n            else:\n                # keep the dimension\n                item = slice(item, None, len(self))\n\n        new_data = self.new()\n        if isinstance(item, (torch.Tensor)):\n            assert item.dim() == 1, 'Only support to get the' \\\n                                 ' values along the first dimension.'\n            if isinstance(item, torch.BoolTensor):\n                assert len(item) == len(self), f'The shape of the' \\\n                                               f' input(BoolTensor)) ' \\\n                                               f'{len(item)} ' \\\n                                               f' does not match the shape ' \\\n                                               f'of the indexed tensor ' \\\n                                               f'in results_filed ' \\\n                                               f'{len(self)} at ' \\\n                                               f'first dimension. '\n\n            for k, v in self.items():\n                if isinstance(v, torch.Tensor):\n                    new_data[k] = v[item]\n                elif isinstance(v, np.ndarray):\n                    new_data[k] = v[item.cpu().numpy()]\n                elif isinstance(v, list):\n                    r_list = []\n                    # convert to indexes from boolTensor\n                    if isinstance(item, torch.BoolTensor):\n                        indexes = torch.nonzero(item).view(-1)\n                    else:\n                        indexes = item\n                    for index in indexes:\n                        r_list.append(v[index])\n                    new_data[k] = r_list\n        else:\n            # item is a slice\n            for k, v in self.items():\n                new_data[k] = v[item]\n        return new_data\n\n    @staticmethod\n    def cat(instances_list):\n        \"\"\"Concat the predictions of all :obj:`InstanceData` in the list.\n\n        Args:\n            instances_list (list[:obj:`InstanceData`]): A list\n                of :obj:`InstanceData`.\n\n        Returns:\n            obj:`InstanceData`\n        \"\"\"\n        assert all(\n            isinstance(results, InstanceData) for results in instances_list)\n        assert len(instances_list) > 0\n        if len(instances_list) == 1:\n            return instances_list[0]\n\n        new_data = instances_list[0].new()\n        for k in instances_list[0]._data_fields:\n            values = [results[k] for results in instances_list]\n            v0 = values[0]\n            if isinstance(v0, torch.Tensor):\n                values = torch.cat(values, dim=0)\n            elif isinstance(v0, np.ndarray):\n                values = np.concatenate(values, axis=0)\n            elif isinstance(v0, list):\n                values = list(itertools.chain(*values))\n            else:\n                raise ValueError(\n                    f'Can not concat the {k} which is a {type(v0)}')\n            new_data[k] = values\n        return new_data\n\n    def __len__(self):\n        if len(self._data_fields):\n            for v in self.values():\n                return len(v)\n        else:\n            raise AssertionError('This is an empty `InstanceData`.')\n"
  },
  {
    "path": "mmdet/core/evaluation/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .class_names import (cityscapes_classes, coco_classes, dataset_aliases,\n                          get_classes, imagenet_det_classes,\n                          imagenet_vid_classes, objects365v1_classes,\n                          objects365v2_classes, oid_challenge_classes,\n                          oid_v6_classes, voc_classes)\nfrom .eval_hooks import DistEvalHook, EvalHook\nfrom .mean_ap import average_precision, eval_map, print_map_summary\nfrom .panoptic_utils import INSTANCE_OFFSET\nfrom .recall import (eval_recalls, plot_iou_recall, plot_num_recall,\n                     print_recall_summary)\n\n__all__ = [\n    'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes',\n    'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes',\n    'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map',\n    'print_map_summary', 'eval_recalls', 'print_recall_summary',\n    'plot_num_recall', 'plot_iou_recall', 'oid_v6_classes',\n    'oid_challenge_classes', 'objects365v1_classes', 'objects365v2_classes',\n    'INSTANCE_OFFSET'\n]\n"
  },
  {
    "path": "mmdet/core/evaluation/bbox_overlaps.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\n\n\ndef bbox_overlaps(bboxes1,\n                  bboxes2,\n                  mode='iou',\n                  eps=1e-6,\n                  use_legacy_coordinate=False):\n    \"\"\"Calculate the ious between each bbox of bboxes1 and bboxes2.\n\n    Args:\n        bboxes1 (ndarray): Shape (n, 4)\n        bboxes2 (ndarray): Shape (k, 4)\n        mode (str): IOU (intersection over union) or IOF (intersection\n            over foreground)\n        use_legacy_coordinate (bool): Whether to use coordinate system in\n            mmdet v1.x. which means width, height should be\n            calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.\n            Note when function is used in `VOCDataset`, it should be\n            True to align with the official implementation\n            `http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar`\n            Default: False.\n\n    Returns:\n        ious (ndarray): Shape (n, k)\n    \"\"\"\n\n    assert mode in ['iou', 'iof']\n    if not use_legacy_coordinate:\n        extra_length = 0.\n    else:\n        extra_length = 1.\n    bboxes1 = bboxes1.astype(np.float32)\n    bboxes2 = bboxes2.astype(np.float32)\n    rows = bboxes1.shape[0]\n    cols = bboxes2.shape[0]\n    ious = np.zeros((rows, cols), dtype=np.float32)\n    if rows * cols == 0:\n        return ious\n    exchange = False\n    if bboxes1.shape[0] > bboxes2.shape[0]:\n        bboxes1, bboxes2 = bboxes2, bboxes1\n        ious = np.zeros((cols, rows), dtype=np.float32)\n        exchange = True\n    area1 = (bboxes1[:, 2] - bboxes1[:, 0] + extra_length) * (\n        bboxes1[:, 3] - bboxes1[:, 1] + extra_length)\n    area2 = (bboxes2[:, 2] - bboxes2[:, 0] + extra_length) * (\n        bboxes2[:, 3] - bboxes2[:, 1] + extra_length)\n    for i in range(bboxes1.shape[0]):\n        x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0])\n        y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1])\n        x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2])\n        y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3])\n        overlap = np.maximum(x_end - x_start + extra_length, 0) * np.maximum(\n            y_end - y_start + extra_length, 0)\n        if mode == 'iou':\n            union = area1[i] + area2 - overlap\n        else:\n            union = area1[i] if not exchange else area2\n        union = np.maximum(union, eps)\n        ious[i, :] = overlap / union\n    if exchange:\n        ious = ious.T\n    return ious\n"
  },
  {
    "path": "mmdet/core/evaluation/class_names.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\n\n\ndef wider_face_classes():\n    return ['face']\n\n\ndef voc_classes():\n    return [\n        'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',\n        'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',\n        'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'\n    ]\n\n\ndef imagenet_det_classes():\n    return [\n        'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo',\n        'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam',\n        'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap',\n        'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder',\n        'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito',\n        'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle',\n        'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker',\n        'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew',\n        'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper',\n        'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly',\n        'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig',\n        'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog',\n        'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart',\n        'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger',\n        'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim',\n        'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse',\n        'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle',\n        'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard',\n        'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can',\n        'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace',\n        'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume',\n        'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza',\n        'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine',\n        'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse',\n        'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator',\n        'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler',\n        'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver',\n        'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile',\n        'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula',\n        'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer',\n        'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine',\n        'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie',\n        'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet',\n        'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin',\n        'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft',\n        'whale', 'wine_bottle', 'zebra'\n    ]\n\n\ndef imagenet_vid_classes():\n    return [\n        'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car',\n        'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda',\n        'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit',\n        'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle',\n        'watercraft', 'whale', 'zebra'\n    ]\n\n\ndef coco_classes():\n    return [\n        'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',\n        'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign',\n        'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',\n        'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n        'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n        'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard',\n        'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork',\n        'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n        'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair',\n        'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv',\n        'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',\n        'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n        'scissors', 'teddy_bear', 'hair_drier', 'toothbrush'\n    ]\n\n\ndef cityscapes_classes():\n    return [\n        'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',\n        'bicycle'\n    ]\n\n\ndef oid_challenge_classes():\n    return [\n        'Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle',\n        'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl',\n        'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert',\n        'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee',\n        'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink',\n        'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table',\n        'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light',\n        'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum',\n        'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat',\n        'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt',\n        'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear',\n        'Vehicle registration plate', 'Microphone', 'Musical keyboard',\n        'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable',\n        'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries',\n        'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane',\n        'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail',\n        'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle',\n        'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat',\n        'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame',\n        'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet',\n        'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag',\n        'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree',\n        'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine',\n        'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance',\n        'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard',\n        'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf',\n        'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch',\n        'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster',\n        'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal',\n        'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer',\n        'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer',\n        'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace',\n        'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry',\n        'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot',\n        'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite',\n        'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper',\n        'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft',\n        'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter',\n        'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra',\n        'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard',\n        'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building',\n        'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll',\n        'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon',\n        'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock',\n        'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance',\n        'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair',\n        'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat',\n        'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen',\n        'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust',\n        'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot',\n        'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken',\n        'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod',\n        'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet',\n        'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture',\n        'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat',\n        'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep',\n        'Tablet computer', 'Pillow', 'Kitchen & dining room table',\n        'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree',\n        'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread',\n        'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope',\n        'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber',\n        'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies',\n        'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch',\n        'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags',\n        'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock',\n        'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza',\n        'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store',\n        'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry',\n        'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase',\n        'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft',\n        'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer',\n        'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon',\n        'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger',\n        'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball',\n        'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin',\n        'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle',\n        'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot',\n        'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle',\n        'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman',\n        'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper',\n        'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone',\n        'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear',\n        'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail',\n        'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn',\n        'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango',\n        'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell',\n        'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase',\n        'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup',\n        'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula',\n        'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon'\n    ]\n\n\ndef oid_v6_classes():\n    return [\n        'Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football',\n        'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy',\n        'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye',\n        'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard',\n        'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber',\n        'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick',\n        'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle',\n        'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot',\n        'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy',\n        'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt',\n        'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear',\n        'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot',\n        'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee',\n        'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw',\n        'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern',\n        'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace',\n        'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer',\n        'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock',\n        'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft',\n        'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile',\n        'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel',\n        'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola',\n        'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building',\n        'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor',\n        'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment',\n        'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini',\n        'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur',\n        'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula',\n        'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser',\n        'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero',\n        'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener',\n        'Goggles', 'Human body', 'Roller skates', 'Coffee cup',\n        'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign',\n        'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker',\n        'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food',\n        'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove',\n        'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax',\n        'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart',\n        'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind',\n        'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light',\n        'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear',\n        'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle',\n        'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat',\n        'Baseball bat', 'Baseball glove', 'Mixing bowl',\n        'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House',\n        'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed',\n        'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer',\n        'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster',\n        'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw',\n        'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate',\n        'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove',\n        'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)',\n        'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet',\n        'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife',\n        'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse',\n        'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard',\n        'Billiard table', 'Mammal', 'Mouse', 'Motorcycle',\n        'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow',\n        'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk',\n        'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom',\n        'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device',\n        'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard',\n        'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball',\n        'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl',\n        'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta',\n        'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer',\n        'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile',\n        'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda',\n        'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood',\n        'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi',\n        'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine',\n        'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table',\n        'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco',\n        'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree',\n        'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray',\n        'Trousers', 'Bowling equipment', 'Football helmet', 'Truck',\n        'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag',\n        'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale',\n        'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion',\n        'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck',\n        'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper',\n        'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog',\n        'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer',\n        'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark',\n        'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser',\n        'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger',\n        'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus',\n        'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull',\n        'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench',\n        'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange',\n        'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet',\n        'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut',\n        'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera',\n        'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable',\n        'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish',\n        'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple',\n        'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower',\n        'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug',\n        'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow',\n        'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone',\n        'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray',\n        'Kitchen & dining room table', 'Dog bed', 'Cake stand',\n        'Cat furniture', 'Bathroom accessory', 'Facial tissue holder',\n        'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler',\n        'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry',\n        'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily',\n        'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant',\n        'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon',\n        'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich',\n        'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod',\n        'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume',\n        'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair',\n        'Rugby ball', 'Armadillo', 'Maracas', 'Helmet'\n    ]\n\n\ndef objects365v1_classes():\n    return [\n        'person', 'sneakers', 'chair', 'hat', 'lamp', 'bottle',\n        'cabinet/shelf', 'cup', 'car', 'glasses', 'picture/frame', 'desk',\n        'handbag', 'street lights', 'book', 'plate', 'helmet', 'leather shoes',\n        'pillow', 'glove', 'potted plant', 'bracelet', 'flower', 'tv',\n        'storage box', 'vase', 'bench', 'wine glass', 'boots', 'bowl',\n        'dining table', 'umbrella', 'boat', 'flag', 'speaker', 'trash bin/can',\n        'stool', 'backpack', 'couch', 'belt', 'carpet', 'basket',\n        'towel/napkin', 'slippers', 'barrel/bucket', 'coffee table', 'suv',\n        'toy', 'tie', 'bed', 'traffic light', 'pen/pencil', 'microphone',\n        'sandals', 'canned', 'necklace', 'mirror', 'faucet', 'bicycle',\n        'bread', 'high heels', 'ring', 'van', 'watch', 'sink', 'horse', 'fish',\n        'apple', 'camera', 'candle', 'teddy bear', 'cake', 'motorcycle',\n        'wild bird', 'laptop', 'knife', 'traffic sign', 'cell phone', 'paddle',\n        'truck', 'cow', 'power outlet', 'clock', 'drum', 'fork', 'bus',\n        'hanger', 'nightstand', 'pot/pan', 'sheep', 'guitar', 'traffic cone',\n        'tea pot', 'keyboard', 'tripod', 'hockey', 'fan', 'dog', 'spoon',\n        'blackboard/whiteboard', 'balloon', 'air conditioner', 'cymbal',\n        'mouse', 'telephone', 'pickup truck', 'orange', 'banana', 'airplane',\n        'luggage', 'skis', 'soccer', 'trolley', 'oven', 'remote',\n        'baseball glove', 'paper towel', 'refrigerator', 'train', 'tomato',\n        'machinery vehicle', 'tent', 'shampoo/shower gel', 'head phone',\n        'lantern', 'donut', 'cleaning products', 'sailboat', 'tangerine',\n        'pizza', 'kite', 'computer box', 'elephant', 'toiletries', 'gas stove',\n        'broccoli', 'toilet', 'stroller', 'shovel', 'baseball bat',\n        'microwave', 'skateboard', 'surfboard', 'surveillance camera', 'gun',\n        'life saver', 'cat', 'lemon', 'liquid soap', 'zebra', 'duck',\n        'sports car', 'giraffe', 'pumpkin', 'piano', 'stop sign', 'radiator',\n        'converter', 'tissue ', 'carrot', 'washing machine', 'vent', 'cookies',\n        'cutting/chopping board', 'tennis racket', 'candy',\n        'skating and skiing shoes', 'scissors', 'folder', 'baseball',\n        'strawberry', 'bow tie', 'pigeon', 'pepper', 'coffee machine',\n        'bathtub', 'snowboard', 'suitcase', 'grapes', 'ladder', 'pear',\n        'american football', 'basketball', 'potato', 'paint brush', 'printer',\n        'billiards', 'fire hydrant', 'goose', 'projector', 'sausage',\n        'fire extinguisher', 'extension cord', 'facial mask', 'tennis ball',\n        'chopsticks', 'electronic stove and gas stove', 'pie', 'frisbee',\n        'kettle', 'hamburger', 'golf club', 'cucumber', 'clutch', 'blender',\n        'tong', 'slide', 'hot dog', 'toothbrush', 'facial cleanser', 'mango',\n        'deer', 'egg', 'violin', 'marker', 'ship', 'chicken', 'onion',\n        'ice cream', 'tape', 'wheelchair', 'plum', 'bar soap', 'scale',\n        'watermelon', 'cabbage', 'router/modem', 'golf ball', 'pine apple',\n        'crane', 'fire truck', 'peach', 'cello', 'notepaper', 'tricycle',\n        'toaster', 'helicopter', 'green beans', 'brush', 'carriage', 'cigar',\n        'earphone', 'penguin', 'hurdle', 'swing', 'radio', 'CD',\n        'parking meter', 'swan', 'garlic', 'french fries', 'horn', 'avocado',\n        'saxophone', 'trumpet', 'sandwich', 'cue', 'kiwi fruit', 'bear',\n        'fishing rod', 'cherry', 'tablet', 'green vegetables', 'nuts', 'corn',\n        'key', 'screwdriver', 'globe', 'broom', 'pliers', 'volleyball',\n        'hammer', 'eggplant', 'trophy', 'dates', 'board eraser', 'rice',\n        'tape measure/ruler', 'dumbbell', 'hamimelon', 'stapler', 'camel',\n        'lettuce', 'goldfish', 'meat balls', 'medal', 'toothpaste', 'antelope',\n        'shrimp', 'rickshaw', 'trombone', 'pomegranate', 'coconut',\n        'jellyfish', 'mushroom', 'calculator', 'treadmill', 'butterfly',\n        'egg tart', 'cheese', 'pig', 'pomelo', 'race car', 'rice cooker',\n        'tuba', 'crosswalk sign', 'papaya', 'hair drier', 'green onion',\n        'chips', 'dolphin', 'sushi', 'urinal', 'donkey', 'electric drill',\n        'spring rolls', 'tortoise/turtle', 'parrot', 'flute', 'measuring cup',\n        'shark', 'steak', 'poker card', 'binoculars', 'llama', 'radish',\n        'noodles', 'yak', 'mop', 'crab', 'microscope', 'barbell', 'bread/bun',\n        'baozi', 'lion', 'red cabbage', 'polar bear', 'lighter', 'seal',\n        'mangosteen', 'comb', 'eraser', 'pitaya', 'scallop', 'pencil case',\n        'saw', 'table tennis paddle', 'okra', 'starfish', 'eagle', 'monkey',\n        'durian', 'game board', 'rabbit', 'french horn', 'ambulance',\n        'asparagus', 'hoverboard', 'pasta', 'target', 'hotair balloon',\n        'chainsaw', 'lobster', 'iron', 'flashlight'\n    ]\n\n\ndef objects365v2_classes():\n    return [\n        'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp',\n        'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf',\n        'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet',\n        'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower',\n        'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', 'Pillow', 'Boots',\n        'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt',\n        'Moniter/TV', 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker',\n        'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', 'Stool',\n        'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Bakset', 'Drum',\n        'Pen/Pencil', 'Bus', 'Wild Bird', 'High Heels', 'Motorcycle', 'Guitar',\n        'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck',\n        'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy',\n        'Candle', 'Sailboat', 'Laptop', 'Awning', 'Bed', 'Faucet', 'Tent',\n        'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner',\n        'Knife', 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork',\n        'Traffic Sign', 'Ballon', 'Tripod', 'Dog', 'Spoon', 'Clock', 'Pot',\n        'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger',\n        'Blackboard/Whiteboard', 'Napkin', 'Other Fish', 'Orange/Tangerine',\n        'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle',\n        'Fan', 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane',\n        'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', 'Luggage',\n        'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone',\n        'Sports Car', 'Stop Sign', 'Dessert', 'Scooter', 'Stroller', 'Crane',\n        'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat',\n        'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza',\n        'Elephant', 'Skateboard', 'Surfboard', 'Gun',\n        'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot',\n        'Toilet', 'Kite', 'Strawberry', 'Other Balls', 'Shovel', 'Pepper',\n        'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks',\n        'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board',\n        'Coffee Table', 'Side Table', 'Scissors', 'Marker', 'Pie', 'Ladder',\n        'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball',\n        'Zebra', 'Grape', 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin',\n        'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', 'Billards',\n        'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase',\n        'Cucumber', 'Cigar/Cigarette ', 'Paint Brush', 'Pear', 'Heavy Truck',\n        'Hamburger', 'Extractor', 'Extention Cord', 'Tong', 'Tennis Racket',\n        'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis',\n        'Ship', 'Swing', 'Coffee Machine', 'Slide', 'Carriage', 'Onion',\n        'Green beans', 'Projector', 'Frisbee',\n        'Washing Machine/Drying Machine', 'Chicken', 'Printer', 'Watermelon',\n        'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hotair ballon',\n        'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog',\n        'Blender', 'Peach', 'Rice', 'Wallet/Purse', 'Volleyball', 'Deer',\n        'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple',\n        'Golf Ball', 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle',\n        'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', 'Megaphone',\n        'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion',\n        'Sandwich', 'Nuts', 'Speed Limit Sign', 'Induction Cooker', 'Broom',\n        'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit',\n        'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese',\n        'Notepaper', 'Cherry', 'Pliers', 'CD', 'Pasta', 'Hammer', 'Cue',\n        'Avocado', 'Hamimelon', 'Flask', 'Mushroon', 'Screwdriver', 'Soap',\n        'Recorder', 'Bear', 'Eggplant', 'Board Eraser', 'Coconut',\n        'Tape Measur/ Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', 'Steak',\n        'Crosswalk Sign', 'Stapler', 'Campel', 'Formula 1 ', 'Pomegranate',\n        'Dishwasher', 'Crab', 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba',\n        'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', 'Buttefly',\n        'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill',\n        'Hair Dryer', 'Egg tart', 'Jellyfish', 'Treadmill', 'Lighter',\n        'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target',\n        'French', 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak',\n        'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', 'Scallop',\n        'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Teniis paddle',\n        'Cosmetics Brush/Eyeliner Pencil', 'Chainsaw', 'Eraser', 'Lobster',\n        'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling',\n        'Table Tennis '\n    ]\n\n\ndataset_aliases = {\n    'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'],\n    'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'],\n    'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'],\n    'coco': ['coco', 'mscoco', 'ms_coco'],\n    'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'],\n    'cityscapes': ['cityscapes'],\n    'oid_challenge': ['oid_challenge', 'openimages_challenge'],\n    'oid_v6': ['oid_v6', 'openimages_v6'],\n    'objects365v1': ['objects365v1', 'obj365v1'],\n    'objects365v2': ['objects365v2', 'obj365v2']\n}\n\n\ndef get_classes(dataset):\n    \"\"\"Get class names of a dataset.\"\"\"\n    alias2name = {}\n    for name, aliases in dataset_aliases.items():\n        for alias in aliases:\n            alias2name[alias] = name\n\n    if mmcv.is_str(dataset):\n        if dataset in alias2name:\n            labels = eval(alias2name[dataset] + '_classes()')\n        else:\n            raise ValueError(f'Unrecognized dataset: {dataset}')\n    else:\n        raise TypeError(f'dataset must a str, but got {type(dataset)}')\n    return labels\n"
  },
  {
    "path": "mmdet/core/evaluation/eval_hooks.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport bisect\nimport os.path as osp\n\nimport mmcv\nimport torch.distributed as dist\nfrom mmcv.runner import DistEvalHook as BaseDistEvalHook\nfrom mmcv.runner import EvalHook as BaseEvalHook\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\n\ndef _calc_dynamic_intervals(start_interval, dynamic_interval_list):\n    assert mmcv.is_list_of(dynamic_interval_list, tuple)\n\n    dynamic_milestones = [0]\n    dynamic_milestones.extend(\n        [dynamic_interval[0] for dynamic_interval in dynamic_interval_list])\n    dynamic_intervals = [start_interval]\n    dynamic_intervals.extend(\n        [dynamic_interval[1] for dynamic_interval in dynamic_interval_list])\n    return dynamic_milestones, dynamic_intervals\n\n\nclass EvalHook(BaseEvalHook):\n\n    def __init__(self, *args, dynamic_intervals=None, **kwargs):\n        super(EvalHook, self).__init__(*args, **kwargs)\n        self.latest_results = None\n\n        self.use_dynamic_intervals = dynamic_intervals is not None\n        if self.use_dynamic_intervals:\n            self.dynamic_milestones, self.dynamic_intervals = \\\n                _calc_dynamic_intervals(self.interval, dynamic_intervals)\n\n    def _decide_interval(self, runner):\n        if self.use_dynamic_intervals:\n            progress = runner.epoch if self.by_epoch else runner.iter\n            step = bisect.bisect(self.dynamic_milestones, (progress + 1))\n            # Dynamically modify the evaluation interval\n            self.interval = self.dynamic_intervals[step - 1]\n\n    def before_train_epoch(self, runner):\n        \"\"\"Evaluate the model only at the start of training by epoch.\"\"\"\n        self._decide_interval(runner)\n        super().before_train_epoch(runner)\n\n    def before_train_iter(self, runner):\n        self._decide_interval(runner)\n        super().before_train_iter(runner)\n\n    def _do_evaluate(self, runner):\n        \"\"\"perform evaluation and save ckpt.\"\"\"\n        if not self._should_evaluate(runner):\n            return\n\n        from mmdet.apis import single_gpu_test\n\n        # Changed results to self.results so that MMDetWandbHook can access\n        # the evaluation results and log them to wandb.\n        results = single_gpu_test(runner.model, self.dataloader, show=False)\n        self.latest_results = results\n        runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)\n        key_score = self.evaluate(runner, results)\n        # the key_score may be `None` so it needs to skip the action to save\n        # the best checkpoint\n        if self.save_best and key_score:\n            self._save_ckpt(runner, key_score)\n\n\n# Note: Considering that MMCV's EvalHook updated its interface in V1.3.16,\n# in order to avoid strong version dependency, we did not directly\n# inherit EvalHook but BaseDistEvalHook.\nclass DistEvalHook(BaseDistEvalHook):\n\n    def __init__(self, *args, dynamic_intervals=None, **kwargs):\n        super(DistEvalHook, self).__init__(*args, **kwargs)\n        self.latest_results = None\n\n        self.use_dynamic_intervals = dynamic_intervals is not None\n        if self.use_dynamic_intervals:\n            self.dynamic_milestones, self.dynamic_intervals = \\\n                _calc_dynamic_intervals(self.interval, dynamic_intervals)\n\n    def _decide_interval(self, runner):\n        if self.use_dynamic_intervals:\n            progress = runner.epoch if self.by_epoch else runner.iter\n            step = bisect.bisect(self.dynamic_milestones, (progress + 1))\n            # Dynamically modify the evaluation interval\n            self.interval = self.dynamic_intervals[step - 1]\n\n    def before_train_epoch(self, runner):\n        \"\"\"Evaluate the model only at the start of training by epoch.\"\"\"\n        self._decide_interval(runner)\n        super().before_train_epoch(runner)\n\n    def before_train_iter(self, runner):\n        self._decide_interval(runner)\n        super().before_train_iter(runner)\n\n    def _do_evaluate(self, runner):\n        \"\"\"perform evaluation and save ckpt.\"\"\"\n        # Synchronization of BatchNorm's buffer (running_mean\n        # and running_var) is not supported in the DDP of pytorch,\n        # which may cause the inconsistent performance of models in\n        # different ranks, so we broadcast BatchNorm's buffers\n        # of rank 0 to other ranks to avoid this.\n        if self.broadcast_bn_buffer:\n            model = runner.model\n            for name, module in model.named_modules():\n                if isinstance(module,\n                              _BatchNorm) and module.track_running_stats:\n                    dist.broadcast(module.running_var, 0)\n                    dist.broadcast(module.running_mean, 0)\n\n        if not self._should_evaluate(runner):\n            return\n\n        tmpdir = self.tmpdir\n        if tmpdir is None:\n            tmpdir = osp.join(runner.work_dir, '.eval_hook')\n\n        from mmdet.apis import multi_gpu_test\n\n        # Changed results to self.results so that MMDetWandbHook can access\n        # the evaluation results and log them to wandb.\n        results = multi_gpu_test(\n            runner.model,\n            self.dataloader,\n            tmpdir=tmpdir,\n            gpu_collect=self.gpu_collect)\n        self.latest_results = results\n        if runner.rank == 0:\n            print('\\n')\n            runner.log_buffer.output['eval_iter_num'] = len(self.dataloader)\n            key_score = self.evaluate(runner, results)\n\n            # the key_score may be `None` so it needs to skip\n            # the action to save the best checkpoint\n            if self.save_best and key_score:\n                self._save_ckpt(runner, key_score)\n"
  },
  {
    "path": "mmdet/core/evaluation/mean_ap.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom multiprocessing import Pool\n\nimport mmcv\nimport numpy as np\nfrom mmcv.utils import print_log\nfrom terminaltables import AsciiTable\n\nfrom .bbox_overlaps import bbox_overlaps\nfrom .class_names import get_classes\n\n\ndef average_precision(recalls, precisions, mode='area'):\n    \"\"\"Calculate average precision (for single or multiple scales).\n\n    Args:\n        recalls (ndarray): shape (num_scales, num_dets) or (num_dets, )\n        precisions (ndarray): shape (num_scales, num_dets) or (num_dets, )\n        mode (str): 'area' or '11points', 'area' means calculating the area\n            under precision-recall curve, '11points' means calculating\n            the average precision of recalls at [0, 0.1, ..., 1]\n\n    Returns:\n        float or ndarray: calculated average precision\n    \"\"\"\n    no_scale = False\n    if recalls.ndim == 1:\n        no_scale = True\n        recalls = recalls[np.newaxis, :]\n        precisions = precisions[np.newaxis, :]\n    assert recalls.shape == precisions.shape and recalls.ndim == 2\n    num_scales = recalls.shape[0]\n    ap = np.zeros(num_scales, dtype=np.float32)\n    if mode == 'area':\n        zeros = np.zeros((num_scales, 1), dtype=recalls.dtype)\n        ones = np.ones((num_scales, 1), dtype=recalls.dtype)\n        mrec = np.hstack((zeros, recalls, ones))\n        mpre = np.hstack((zeros, precisions, zeros))\n        for i in range(mpre.shape[1] - 1, 0, -1):\n            mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i])\n        for i in range(num_scales):\n            ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0]\n            ap[i] = np.sum(\n                (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1])\n    elif mode == '11points':\n        for i in range(num_scales):\n            for thr in np.arange(0, 1 + 1e-3, 0.1):\n                precs = precisions[i, recalls[i, :] >= thr]\n                prec = precs.max() if precs.size > 0 else 0\n                ap[i] += prec\n        ap /= 11\n    else:\n        raise ValueError(\n            'Unrecognized mode, only \"area\" and \"11points\" are supported')\n    if no_scale:\n        ap = ap[0]\n    return ap\n\n\ndef tpfp_imagenet(det_bboxes,\n                  gt_bboxes,\n                  gt_bboxes_ignore=None,\n                  default_iou_thr=0.5,\n                  area_ranges=None,\n                  use_legacy_coordinate=False,\n                  **kwargs):\n    \"\"\"Check if detected bboxes are true positive or false positive.\n\n    Args:\n        det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).\n        gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n        gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,\n            of shape (k, 4). Default: None\n        default_iou_thr (float): IoU threshold to be considered as matched for\n            medium and large bboxes (small ones have special rules).\n            Default: 0.5.\n        area_ranges (list[tuple] | None): Range of bbox areas to be evaluated,\n            in the format [(min1, max1), (min2, max2), ...]. Default: None.\n        use_legacy_coordinate (bool): Whether to use coordinate system in\n            mmdet v1.x. which means width, height should be\n            calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.\n            Default: False.\n\n    Returns:\n        tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of\n        each array is (num_scales, m).\n    \"\"\"\n\n    if not use_legacy_coordinate:\n        extra_length = 0.\n    else:\n        extra_length = 1.\n\n    # an indicator of ignored gts\n    gt_ignore_inds = np.concatenate(\n        (np.zeros(gt_bboxes.shape[0],\n                  dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool)))\n    # stack gt_bboxes and gt_bboxes_ignore for convenience\n    gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))\n\n    num_dets = det_bboxes.shape[0]\n    num_gts = gt_bboxes.shape[0]\n    if area_ranges is None:\n        area_ranges = [(None, None)]\n    num_scales = len(area_ranges)\n    # tp and fp are of shape (num_scales, num_gts), each row is tp or fp\n    # of a certain scale.\n    tp = np.zeros((num_scales, num_dets), dtype=np.float32)\n    fp = np.zeros((num_scales, num_dets), dtype=np.float32)\n    if gt_bboxes.shape[0] == 0:\n        if area_ranges == [(None, None)]:\n            fp[...] = 1\n        else:\n            det_areas = (\n                det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (\n                    det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)\n            for i, (min_area, max_area) in enumerate(area_ranges):\n                fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n        return tp, fp\n    ious = bbox_overlaps(\n        det_bboxes, gt_bboxes - 1, use_legacy_coordinate=use_legacy_coordinate)\n    gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length\n    gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length\n    iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)),\n                          default_iou_thr)\n    # sort all detections by scores in descending order\n    sort_inds = np.argsort(-det_bboxes[:, -1])\n    for k, (min_area, max_area) in enumerate(area_ranges):\n        gt_covered = np.zeros(num_gts, dtype=bool)\n        # if no area range is specified, gt_area_ignore is all False\n        if min_area is None:\n            gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n        else:\n            gt_areas = gt_w * gt_h\n            gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n        for i in sort_inds:\n            max_iou = -1\n            matched_gt = -1\n            # find best overlapped available gt\n            for j in range(num_gts):\n                # different from PASCAL VOC: allow finding other gts if the\n                # best overlapped ones are already matched by other det bboxes\n                if gt_covered[j]:\n                    continue\n                elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou:\n                    max_iou = ious[i, j]\n                    matched_gt = j\n            # there are 4 cases for a det bbox:\n            # 1. it matches a gt, tp = 1, fp = 0\n            # 2. it matches an ignored gt, tp = 0, fp = 0\n            # 3. it matches no gt and within area range, tp = 0, fp = 1\n            # 4. it matches no gt but is beyond area range, tp = 0, fp = 0\n            if matched_gt >= 0:\n                gt_covered[matched_gt] = 1\n                if not (gt_ignore_inds[matched_gt]\n                        or gt_area_ignore[matched_gt]):\n                    tp[k, i] = 1\n            elif min_area is None:\n                fp[k, i] = 1\n            else:\n                bbox = det_bboxes[i, :4]\n                area = (bbox[2] - bbox[0] + extra_length) * (\n                    bbox[3] - bbox[1] + extra_length)\n                if area >= min_area and area < max_area:\n                    fp[k, i] = 1\n    return tp, fp\n\n\ndef tpfp_default(det_bboxes,\n                 gt_bboxes,\n                 gt_bboxes_ignore=None,\n                 iou_thr=0.5,\n                 area_ranges=None,\n                 use_legacy_coordinate=False,\n                 **kwargs):\n    \"\"\"Check if detected bboxes are true positive or false positive.\n\n    Args:\n        det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).\n        gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n        gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,\n            of shape (k, 4). Default: None\n        iou_thr (float): IoU threshold to be considered as matched.\n            Default: 0.5.\n        area_ranges (list[tuple] | None): Range of bbox areas to be\n            evaluated, in the format [(min1, max1), (min2, max2), ...].\n            Default: None.\n        use_legacy_coordinate (bool): Whether to use coordinate system in\n            mmdet v1.x. which means width, height should be\n            calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.\n            Default: False.\n\n    Returns:\n        tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of\n        each array is (num_scales, m).\n    \"\"\"\n\n    if not use_legacy_coordinate:\n        extra_length = 0.\n    else:\n        extra_length = 1.\n\n    # an indicator of ignored gts\n    gt_ignore_inds = np.concatenate(\n        (np.zeros(gt_bboxes.shape[0],\n                  dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool)))\n    # stack gt_bboxes and gt_bboxes_ignore for convenience\n    gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))\n\n    num_dets = det_bboxes.shape[0]\n    num_gts = gt_bboxes.shape[0]\n    if area_ranges is None:\n        area_ranges = [(None, None)]\n    num_scales = len(area_ranges)\n    # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of\n    # a certain scale\n    tp = np.zeros((num_scales, num_dets), dtype=np.float32)\n    fp = np.zeros((num_scales, num_dets), dtype=np.float32)\n\n    # if there is no gt bboxes in this image, then all det bboxes\n    # within area range are false positives\n    if gt_bboxes.shape[0] == 0:\n        if area_ranges == [(None, None)]:\n            fp[...] = 1\n        else:\n            det_areas = (\n                det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (\n                    det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)\n            for i, (min_area, max_area) in enumerate(area_ranges):\n                fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n        return tp, fp\n\n    ious = bbox_overlaps(\n        det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate)\n    # for each det, the max iou with all gts\n    ious_max = ious.max(axis=1)\n    # for each det, which gt overlaps most with it\n    ious_argmax = ious.argmax(axis=1)\n    # sort all dets in descending order by scores\n    sort_inds = np.argsort(-det_bboxes[:, -1])\n    for k, (min_area, max_area) in enumerate(area_ranges):\n        gt_covered = np.zeros(num_gts, dtype=bool)\n        # if no area range is specified, gt_area_ignore is all False\n        if min_area is None:\n            gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n        else:\n            gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * (\n                gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length)\n            gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n        for i in sort_inds:\n            if ious_max[i] >= iou_thr:\n                matched_gt = ious_argmax[i]\n                if not (gt_ignore_inds[matched_gt]\n                        or gt_area_ignore[matched_gt]):\n                    if not gt_covered[matched_gt]:\n                        gt_covered[matched_gt] = True\n                        tp[k, i] = 1\n                    else:\n                        fp[k, i] = 1\n                # otherwise ignore this detected bbox, tp = 0, fp = 0\n            elif min_area is None:\n                fp[k, i] = 1\n            else:\n                bbox = det_bboxes[i, :4]\n                area = (bbox[2] - bbox[0] + extra_length) * (\n                    bbox[3] - bbox[1] + extra_length)\n                if area >= min_area and area < max_area:\n                    fp[k, i] = 1\n    return tp, fp\n\n\ndef tpfp_openimages(det_bboxes,\n                    gt_bboxes,\n                    gt_bboxes_ignore=None,\n                    iou_thr=0.5,\n                    area_ranges=None,\n                    use_legacy_coordinate=False,\n                    gt_bboxes_group_of=None,\n                    use_group_of=True,\n                    ioa_thr=0.5,\n                    **kwargs):\n    \"\"\"Check if detected bboxes are true positive or false positive.\n\n    Args:\n        det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5).\n        gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4).\n        gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image,\n            of shape (k, 4). Default: None\n        iou_thr (float): IoU threshold to be considered as matched.\n            Default: 0.5.\n        area_ranges (list[tuple] | None): Range of bbox areas to be\n            evaluated, in the format [(min1, max1), (min2, max2), ...].\n            Default: None.\n        use_legacy_coordinate (bool): Whether to use coordinate system in\n            mmdet v1.x. which means width, height should be\n            calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.\n            Default: False.\n        gt_bboxes_group_of (ndarray): GT group_of of this image, of shape\n            (k, 1). Default: None\n        use_group_of (bool): Whether to use group of when calculate TP and FP,\n            which only used in OpenImages evaluation. Default: True.\n        ioa_thr (float | None): IoA threshold to be considered as matched,\n            which only used in OpenImages evaluation. Default: 0.5.\n\n    Returns:\n        tuple[np.ndarray]: Returns a tuple (tp, fp, det_bboxes), where\n        (tp, fp) whose elements are 0 and 1. The shape of each array is\n        (num_scales, m). (det_bboxes) whose will filter those are not\n        matched by group of gts when processing Open Images evaluation.\n        The shape is (num_scales, m).\n    \"\"\"\n\n    if not use_legacy_coordinate:\n        extra_length = 0.\n    else:\n        extra_length = 1.\n\n    # an indicator of ignored gts\n    gt_ignore_inds = np.concatenate(\n        (np.zeros(gt_bboxes.shape[0],\n                  dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool)))\n    # stack gt_bboxes and gt_bboxes_ignore for convenience\n    gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore))\n\n    num_dets = det_bboxes.shape[0]\n    num_gts = gt_bboxes.shape[0]\n    if area_ranges is None:\n        area_ranges = [(None, None)]\n    num_scales = len(area_ranges)\n    # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of\n    # a certain scale\n    tp = np.zeros((num_scales, num_dets), dtype=np.float32)\n    fp = np.zeros((num_scales, num_dets), dtype=np.float32)\n\n    # if there is no gt bboxes in this image, then all det bboxes\n    # within area range are false positives\n    if gt_bboxes.shape[0] == 0:\n        if area_ranges == [(None, None)]:\n            fp[...] = 1\n        else:\n            det_areas = (\n                det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (\n                    det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)\n            for i, (min_area, max_area) in enumerate(area_ranges):\n                fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n        return tp, fp, det_bboxes\n\n    if gt_bboxes_group_of is not None and use_group_of:\n        # if handle group-of boxes, divided gt boxes into two parts:\n        # non-group-of and group-of.Then calculate ious and ioas through\n        # non-group-of group-of gts respectively. This only used in\n        # OpenImages evaluation.\n        assert gt_bboxes_group_of.shape[0] == gt_bboxes.shape[0]\n        non_group_gt_bboxes = gt_bboxes[~gt_bboxes_group_of]\n        group_gt_bboxes = gt_bboxes[gt_bboxes_group_of]\n        num_gts_group = group_gt_bboxes.shape[0]\n        ious = bbox_overlaps(det_bboxes, non_group_gt_bboxes)\n        ioas = bbox_overlaps(det_bboxes, group_gt_bboxes, mode='iof')\n    else:\n        # if not consider group-of boxes, only calculate ious through gt boxes\n        ious = bbox_overlaps(\n            det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate)\n        ioas = None\n\n    if ious.shape[1] > 0:\n        # for each det, the max iou with all gts\n        ious_max = ious.max(axis=1)\n        # for each det, which gt overlaps most with it\n        ious_argmax = ious.argmax(axis=1)\n        # sort all dets in descending order by scores\n        sort_inds = np.argsort(-det_bboxes[:, -1])\n        for k, (min_area, max_area) in enumerate(area_ranges):\n            gt_covered = np.zeros(num_gts, dtype=bool)\n            # if no area range is specified, gt_area_ignore is all False\n            if min_area is None:\n                gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n            else:\n                gt_areas = (\n                    gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * (\n                        gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length)\n                gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n            for i in sort_inds:\n                if ious_max[i] >= iou_thr:\n                    matched_gt = ious_argmax[i]\n                    if not (gt_ignore_inds[matched_gt]\n                            or gt_area_ignore[matched_gt]):\n                        if not gt_covered[matched_gt]:\n                            gt_covered[matched_gt] = True\n                            tp[k, i] = 1\n                        else:\n                            fp[k, i] = 1\n                    # otherwise ignore this detected bbox, tp = 0, fp = 0\n                elif min_area is None:\n                    fp[k, i] = 1\n                else:\n                    bbox = det_bboxes[i, :4]\n                    area = (bbox[2] - bbox[0] + extra_length) * (\n                        bbox[3] - bbox[1] + extra_length)\n                    if area >= min_area and area < max_area:\n                        fp[k, i] = 1\n    else:\n        # if there is no no-group-of gt bboxes in this image,\n        # then all det bboxes within area range are false positives.\n        # Only used in OpenImages evaluation.\n        if area_ranges == [(None, None)]:\n            fp[...] = 1\n        else:\n            det_areas = (\n                det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * (\n                    det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length)\n            for i, (min_area, max_area) in enumerate(area_ranges):\n                fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1\n\n    if ioas is None or ioas.shape[1] <= 0:\n        return tp, fp, det_bboxes\n    else:\n        # The evaluation of group-of TP and FP are done in two stages:\n        # 1. All detections are first matched to non group-of boxes; true\n        #    positives are determined.\n        # 2. Detections that are determined as false positives are matched\n        #    against group-of boxes and calculated group-of TP and FP.\n        # Only used in OpenImages evaluation.\n        det_bboxes_group = np.zeros(\n            (num_scales, ioas.shape[1], det_bboxes.shape[1]), dtype=float)\n        match_group_of = np.zeros((num_scales, num_dets), dtype=bool)\n        tp_group = np.zeros((num_scales, num_gts_group), dtype=np.float32)\n        ioas_max = ioas.max(axis=1)\n        # for each det, which gt overlaps most with it\n        ioas_argmax = ioas.argmax(axis=1)\n        # sort all dets in descending order by scores\n        sort_inds = np.argsort(-det_bboxes[:, -1])\n        for k, (min_area, max_area) in enumerate(area_ranges):\n            box_is_covered = tp[k]\n            # if no area range is specified, gt_area_ignore is all False\n            if min_area is None:\n                gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool)\n            else:\n                gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (\n                    gt_bboxes[:, 3] - gt_bboxes[:, 1])\n                gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area)\n            for i in sort_inds:\n                matched_gt = ioas_argmax[i]\n                if not box_is_covered[i]:\n                    if ioas_max[i] >= ioa_thr:\n                        if not (gt_ignore_inds[matched_gt]\n                                or gt_area_ignore[matched_gt]):\n                            if not tp_group[k, matched_gt]:\n                                tp_group[k, matched_gt] = 1\n                                match_group_of[k, i] = True\n                            else:\n                                match_group_of[k, i] = True\n\n                            if det_bboxes_group[k, matched_gt, -1] < \\\n                                    det_bboxes[i, -1]:\n                                det_bboxes_group[k, matched_gt] = \\\n                                    det_bboxes[i]\n\n        fp_group = (tp_group <= 0).astype(float)\n        tps = []\n        fps = []\n        # concatenate tp, fp, and det-boxes which not matched group of\n        # gt boxes and tp_group, fp_group, and det_bboxes_group which\n        # matched group of boxes respectively.\n        for i in range(num_scales):\n            tps.append(\n                np.concatenate((tp[i][~match_group_of[i]], tp_group[i])))\n            fps.append(\n                np.concatenate((fp[i][~match_group_of[i]], fp_group[i])))\n            det_bboxes = np.concatenate(\n                (det_bboxes[~match_group_of[i]], det_bboxes_group[i]))\n\n        tp = np.vstack(tps)\n        fp = np.vstack(fps)\n        return tp, fp, det_bboxes\n\n\ndef get_cls_results(det_results, annotations, class_id):\n    \"\"\"Get det results and gt information of a certain class.\n\n    Args:\n        det_results (list[list]): Same as `eval_map()`.\n        annotations (list[dict]): Same as `eval_map()`.\n        class_id (int): ID of a specific class.\n\n    Returns:\n        tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes\n    \"\"\"\n    cls_dets = [img_res[class_id] for img_res in det_results]\n    cls_gts = []\n    cls_gts_ignore = []\n    for ann in annotations:\n        gt_inds = ann['labels'] == class_id\n        cls_gts.append(ann['bboxes'][gt_inds, :])\n\n        if ann.get('labels_ignore', None) is not None:\n            ignore_inds = ann['labels_ignore'] == class_id\n            cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :])\n        else:\n            cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32))\n\n    return cls_dets, cls_gts, cls_gts_ignore\n\n\ndef get_cls_group_ofs(annotations, class_id):\n    \"\"\"Get `gt_group_of` of a certain class, which is used in Open Images.\n\n    Args:\n        annotations (list[dict]): Same as `eval_map()`.\n        class_id (int): ID of a specific class.\n\n    Returns:\n        list[np.ndarray]: `gt_group_of` of a certain class.\n    \"\"\"\n    gt_group_ofs = []\n    for ann in annotations:\n        gt_inds = ann['labels'] == class_id\n        if ann.get('gt_is_group_ofs', None) is not None:\n            gt_group_ofs.append(ann['gt_is_group_ofs'][gt_inds])\n        else:\n            gt_group_ofs.append(np.empty((0, 1), dtype=bool))\n\n    return gt_group_ofs\n\n\ndef eval_map(det_results,\n             annotations,\n             scale_ranges=None,\n             iou_thr=0.5,\n             ioa_thr=None,\n             dataset=None,\n             logger=None,\n             tpfp_fn=None,\n             nproc=4,\n             use_legacy_coordinate=False,\n             use_group_of=False):\n    \"\"\"Evaluate mAP of a dataset.\n\n    Args:\n        det_results (list[list]): [[cls1_det, cls2_det, ...], ...].\n            The outer list indicates images, and the inner list indicates\n            per-class detected bboxes.\n        annotations (list[dict]): Ground truth annotations where each item of\n            the list indicates an image. Keys of annotations are:\n\n            - `bboxes`: numpy array of shape (n, 4)\n            - `labels`: numpy array of shape (n, )\n            - `bboxes_ignore` (optional): numpy array of shape (k, 4)\n            - `labels_ignore` (optional): numpy array of shape (k, )\n        scale_ranges (list[tuple] | None): Range of scales to be evaluated,\n            in the format [(min1, max1), (min2, max2), ...]. A range of\n            (32, 64) means the area range between (32**2, 64**2).\n            Default: None.\n        iou_thr (float): IoU threshold to be considered as matched.\n            Default: 0.5.\n        ioa_thr (float | None): IoA threshold to be considered as matched,\n            which only used in OpenImages evaluation. Default: None.\n        dataset (list[str] | str | None): Dataset name or dataset classes,\n            there are minor differences in metrics for different datasets, e.g.\n            \"voc07\", \"imagenet_det\", etc. Default: None.\n        logger (logging.Logger | str | None): The way to print the mAP\n            summary. See `mmcv.utils.print_log()` for details. Default: None.\n        tpfp_fn (callable | None): The function used to determine true/\n            false positives. If None, :func:`tpfp_default` is used as default\n            unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this\n            case). If it is given as a function, then this function is used\n            to evaluate tp & fp. Default None.\n        nproc (int): Processes used for computing TP and FP.\n            Default: 4.\n        use_legacy_coordinate (bool): Whether to use coordinate system in\n            mmdet v1.x. which means width, height should be\n            calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively.\n            Default: False.\n        use_group_of (bool): Whether to use group of when calculate TP and FP,\n            which only used in OpenImages evaluation. Default: False.\n\n    Returns:\n        tuple: (mAP, [dict, dict, ...])\n    \"\"\"\n    assert len(det_results) == len(annotations)\n    if not use_legacy_coordinate:\n        extra_length = 0.\n    else:\n        extra_length = 1.\n\n    num_imgs = len(det_results)\n    num_scales = len(scale_ranges) if scale_ranges is not None else 1\n    num_classes = len(det_results[0])  # positive class num\n    area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges]\n                   if scale_ranges is not None else None)\n\n    # There is no need to use multi processes to process\n    # when num_imgs = 1 .\n    if num_imgs > 1:\n        assert nproc > 0, 'nproc must be at least one.'\n        nproc = min(nproc, num_imgs)\n        pool = Pool(nproc)\n\n    eval_results = []\n    for i in range(num_classes):\n        # get gt and det bboxes of this class\n        cls_dets, cls_gts, cls_gts_ignore = get_cls_results(\n            det_results, annotations, i)\n        # choose proper function according to datasets to compute tp and fp\n        if tpfp_fn is None:\n            if dataset in ['det', 'vid']:\n                tpfp_fn = tpfp_imagenet\n            elif dataset in ['oid_challenge', 'oid_v6'] \\\n                    or use_group_of is True:\n                tpfp_fn = tpfp_openimages\n            else:\n                tpfp_fn = tpfp_default\n        if not callable(tpfp_fn):\n            raise ValueError(\n                f'tpfp_fn has to be a function or None, but got {tpfp_fn}')\n\n        if num_imgs > 1:\n            # compute tp and fp for each image with multiple processes\n            args = []\n            if use_group_of:\n                # used in Open Images Dataset evaluation\n                gt_group_ofs = get_cls_group_ofs(annotations, i)\n                args.append(gt_group_ofs)\n                args.append([use_group_of for _ in range(num_imgs)])\n            if ioa_thr is not None:\n                args.append([ioa_thr for _ in range(num_imgs)])\n\n            tpfp = pool.starmap(\n                tpfp_fn,\n                zip(cls_dets, cls_gts, cls_gts_ignore,\n                    [iou_thr for _ in range(num_imgs)],\n                    [area_ranges for _ in range(num_imgs)],\n                    [use_legacy_coordinate for _ in range(num_imgs)], *args))\n        else:\n            tpfp = tpfp_fn(\n                cls_dets[0],\n                cls_gts[0],\n                cls_gts_ignore[0],\n                iou_thr,\n                area_ranges,\n                use_legacy_coordinate,\n                gt_bboxes_group_of=(get_cls_group_ofs(annotations, i)[0]\n                                    if use_group_of else None),\n                use_group_of=use_group_of,\n                ioa_thr=ioa_thr)\n            tpfp = [tpfp]\n\n        if use_group_of:\n            tp, fp, cls_dets = tuple(zip(*tpfp))\n        else:\n            tp, fp = tuple(zip(*tpfp))\n        # calculate gt number of each scale\n        # ignored gts or gts beyond the specific scale are not counted\n        num_gts = np.zeros(num_scales, dtype=int)\n        for j, bbox in enumerate(cls_gts):\n            if area_ranges is None:\n                num_gts[0] += bbox.shape[0]\n            else:\n                gt_areas = (bbox[:, 2] - bbox[:, 0] + extra_length) * (\n                    bbox[:, 3] - bbox[:, 1] + extra_length)\n                for k, (min_area, max_area) in enumerate(area_ranges):\n                    num_gts[k] += np.sum((gt_areas >= min_area)\n                                         & (gt_areas < max_area))\n        # sort all det bboxes by score, also sort tp and fp\n        cls_dets = np.vstack(cls_dets)\n        num_dets = cls_dets.shape[0]\n        sort_inds = np.argsort(-cls_dets[:, -1])\n        tp = np.hstack(tp)[:, sort_inds]\n        fp = np.hstack(fp)[:, sort_inds]\n        # calculate recall and precision with tp and fp\n        tp = np.cumsum(tp, axis=1)\n        fp = np.cumsum(fp, axis=1)\n        eps = np.finfo(np.float32).eps\n        recalls = tp / np.maximum(num_gts[:, np.newaxis], eps)\n        precisions = tp / np.maximum((tp + fp), eps)\n        # calculate AP\n        if scale_ranges is None:\n            recalls = recalls[0, :]\n            precisions = precisions[0, :]\n            num_gts = num_gts.item()\n        mode = 'area' if dataset != 'voc07' else '11points'\n        ap = average_precision(recalls, precisions, mode)\n        eval_results.append({\n            'num_gts': num_gts,\n            'num_dets': num_dets,\n            'recall': recalls,\n            'precision': precisions,\n            'ap': ap\n        })\n\n    if num_imgs > 1:\n        pool.close()\n\n    if scale_ranges is not None:\n        # shape (num_classes, num_scales)\n        all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results])\n        all_num_gts = np.vstack(\n            [cls_result['num_gts'] for cls_result in eval_results])\n        mean_ap = []\n        for i in range(num_scales):\n            if np.any(all_num_gts[:, i] > 0):\n                mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean())\n            else:\n                mean_ap.append(0.0)\n    else:\n        aps = []\n        for cls_result in eval_results:\n            if cls_result['num_gts'] > 0:\n                aps.append(cls_result['ap'])\n        mean_ap = np.array(aps).mean().item() if aps else 0.0\n\n    print_map_summary(\n        mean_ap, eval_results, dataset, area_ranges, logger=logger)\n\n    return mean_ap, eval_results\n\n\ndef print_map_summary(mean_ap,\n                      results,\n                      dataset=None,\n                      scale_ranges=None,\n                      logger=None):\n    \"\"\"Print mAP and results of each class.\n\n    A table will be printed to show the gts/dets/recall/AP of each class and\n    the mAP.\n\n    Args:\n        mean_ap (float): Calculated from `eval_map()`.\n        results (list[dict]): Calculated from `eval_map()`.\n        dataset (list[str] | str | None): Dataset name or dataset classes.\n        scale_ranges (list[tuple] | None): Range of scales to be evaluated.\n        logger (logging.Logger | str | None): The way to print the mAP\n            summary. See `mmcv.utils.print_log()` for details. Default: None.\n    \"\"\"\n\n    if logger == 'silent':\n        return\n\n    if isinstance(results[0]['ap'], np.ndarray):\n        num_scales = len(results[0]['ap'])\n    else:\n        num_scales = 1\n\n    if scale_ranges is not None:\n        assert len(scale_ranges) == num_scales\n\n    num_classes = len(results)\n\n    recalls = np.zeros((num_scales, num_classes), dtype=np.float32)\n    aps = np.zeros((num_scales, num_classes), dtype=np.float32)\n    num_gts = np.zeros((num_scales, num_classes), dtype=int)\n    for i, cls_result in enumerate(results):\n        if cls_result['recall'].size > 0:\n            recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1]\n        aps[:, i] = cls_result['ap']\n        num_gts[:, i] = cls_result['num_gts']\n\n    if dataset is None:\n        label_names = [str(i) for i in range(num_classes)]\n    elif mmcv.is_str(dataset):\n        label_names = get_classes(dataset)\n    else:\n        label_names = dataset\n\n    if not isinstance(mean_ap, list):\n        mean_ap = [mean_ap]\n\n    header = ['class', 'gts', 'dets', 'recall', 'ap']\n    for i in range(num_scales):\n        if scale_ranges is not None:\n            print_log(f'Scale range {scale_ranges[i]}', logger=logger)\n        table_data = [header]\n        for j in range(num_classes):\n            row_data = [\n                label_names[j], num_gts[i, j], results[j]['num_dets'],\n                f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}'\n            ]\n            table_data.append(row_data)\n        table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}'])\n        table = AsciiTable(table_data)\n        table.inner_footing_row_border = True\n        print_log('\\n' + table.table, logger=logger)\n"
  },
  {
    "path": "mmdet/core/evaluation/panoptic_utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# A custom value to distinguish instance ID and category ID; need to\n# be greater than the number of categories.\n# For a pixel in the panoptic result map:\n#   pan_id = ins_id * INSTANCE_OFFSET + cat_id\nINSTANCE_OFFSET = 1000\n"
  },
  {
    "path": "mmdet/core/evaluation/recall.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections.abc import Sequence\n\nimport numpy as np\nfrom mmcv.utils import print_log\nfrom terminaltables import AsciiTable\n\nfrom .bbox_overlaps import bbox_overlaps\n\n\ndef _recalls(all_ious, proposal_nums, thrs):\n\n    img_num = all_ious.shape[0]\n    total_gt_num = sum([ious.shape[0] for ious in all_ious])\n\n    _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32)\n    for k, proposal_num in enumerate(proposal_nums):\n        tmp_ious = np.zeros(0)\n        for i in range(img_num):\n            ious = all_ious[i][:, :proposal_num].copy()\n            gt_ious = np.zeros((ious.shape[0]))\n            if ious.size == 0:\n                tmp_ious = np.hstack((tmp_ious, gt_ious))\n                continue\n            for j in range(ious.shape[0]):\n                gt_max_overlaps = ious.argmax(axis=1)\n                max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps]\n                gt_idx = max_ious.argmax()\n                gt_ious[j] = max_ious[gt_idx]\n                box_idx = gt_max_overlaps[gt_idx]\n                ious[gt_idx, :] = -1\n                ious[:, box_idx] = -1\n            tmp_ious = np.hstack((tmp_ious, gt_ious))\n        _ious[k, :] = tmp_ious\n\n    _ious = np.fliplr(np.sort(_ious, axis=1))\n    recalls = np.zeros((proposal_nums.size, thrs.size))\n    for i, thr in enumerate(thrs):\n        recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num)\n\n    return recalls\n\n\ndef set_recall_param(proposal_nums, iou_thrs):\n    \"\"\"Check proposal_nums and iou_thrs and set correct format.\"\"\"\n    if isinstance(proposal_nums, Sequence):\n        _proposal_nums = np.array(proposal_nums)\n    elif isinstance(proposal_nums, int):\n        _proposal_nums = np.array([proposal_nums])\n    else:\n        _proposal_nums = proposal_nums\n\n    if iou_thrs is None:\n        _iou_thrs = np.array([0.5])\n    elif isinstance(iou_thrs, Sequence):\n        _iou_thrs = np.array(iou_thrs)\n    elif isinstance(iou_thrs, float):\n        _iou_thrs = np.array([iou_thrs])\n    else:\n        _iou_thrs = iou_thrs\n\n    return _proposal_nums, _iou_thrs\n\n\ndef eval_recalls(gts,\n                 proposals,\n                 proposal_nums=None,\n                 iou_thrs=0.5,\n                 logger=None,\n                 use_legacy_coordinate=False):\n    \"\"\"Calculate recalls.\n\n    Args:\n        gts (list[ndarray]): a list of arrays of shape (n, 4)\n        proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5)\n        proposal_nums (int | Sequence[int]): Top N proposals to be evaluated.\n        iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5.\n        logger (logging.Logger | str | None): The way to print the recall\n            summary. See `mmcv.utils.print_log()` for details. Default: None.\n        use_legacy_coordinate (bool): Whether use coordinate system\n            in mmdet v1.x. \"1\" was added to both height and width\n            which means w, h should be\n            computed as 'x2 - x1 + 1` and 'y2 - y1 + 1'. Default: False.\n\n\n    Returns:\n        ndarray: recalls of different ious and proposal nums\n    \"\"\"\n\n    img_num = len(gts)\n    assert img_num == len(proposals)\n    proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs)\n    all_ious = []\n    for i in range(img_num):\n        if proposals[i].ndim == 2 and proposals[i].shape[1] == 5:\n            scores = proposals[i][:, 4]\n            sort_idx = np.argsort(scores)[::-1]\n            img_proposal = proposals[i][sort_idx, :]\n        else:\n            img_proposal = proposals[i]\n        prop_num = min(img_proposal.shape[0], proposal_nums[-1])\n        if gts[i] is None or gts[i].shape[0] == 0:\n            ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32)\n        else:\n            ious = bbox_overlaps(\n                gts[i],\n                img_proposal[:prop_num, :4],\n                use_legacy_coordinate=use_legacy_coordinate)\n        all_ious.append(ious)\n    all_ious = np.array(all_ious)\n    recalls = _recalls(all_ious, proposal_nums, iou_thrs)\n\n    print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger)\n    return recalls\n\n\ndef print_recall_summary(recalls,\n                         proposal_nums,\n                         iou_thrs,\n                         row_idxs=None,\n                         col_idxs=None,\n                         logger=None):\n    \"\"\"Print recalls in a table.\n\n    Args:\n        recalls (ndarray): calculated from `bbox_recalls`\n        proposal_nums (ndarray or list): top N proposals\n        iou_thrs (ndarray or list): iou thresholds\n        row_idxs (ndarray): which rows(proposal nums) to print\n        col_idxs (ndarray): which cols(iou thresholds) to print\n        logger (logging.Logger | str | None): The way to print the recall\n            summary. See `mmcv.utils.print_log()` for details. Default: None.\n    \"\"\"\n    proposal_nums = np.array(proposal_nums, dtype=np.int32)\n    iou_thrs = np.array(iou_thrs)\n    if row_idxs is None:\n        row_idxs = np.arange(proposal_nums.size)\n    if col_idxs is None:\n        col_idxs = np.arange(iou_thrs.size)\n    row_header = [''] + iou_thrs[col_idxs].tolist()\n    table_data = [row_header]\n    for i, num in enumerate(proposal_nums[row_idxs]):\n        row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()]\n        row.insert(0, num)\n        table_data.append(row)\n    table = AsciiTable(table_data)\n    print_log('\\n' + table.table, logger=logger)\n\n\ndef plot_num_recall(recalls, proposal_nums):\n    \"\"\"Plot Proposal_num-Recalls curve.\n\n    Args:\n        recalls(ndarray or list): shape (k,)\n        proposal_nums(ndarray or list): same shape as `recalls`\n    \"\"\"\n    if isinstance(proposal_nums, np.ndarray):\n        _proposal_nums = proposal_nums.tolist()\n    else:\n        _proposal_nums = proposal_nums\n    if isinstance(recalls, np.ndarray):\n        _recalls = recalls.tolist()\n    else:\n        _recalls = recalls\n\n    import matplotlib.pyplot as plt\n    f = plt.figure()\n    plt.plot([0] + _proposal_nums, [0] + _recalls)\n    plt.xlabel('Proposal num')\n    plt.ylabel('Recall')\n    plt.axis([0, proposal_nums.max(), 0, 1])\n    f.show()\n\n\ndef plot_iou_recall(recalls, iou_thrs):\n    \"\"\"Plot IoU-Recalls curve.\n\n    Args:\n        recalls(ndarray or list): shape (k,)\n        iou_thrs(ndarray or list): same shape as `recalls`\n    \"\"\"\n    if isinstance(iou_thrs, np.ndarray):\n        _iou_thrs = iou_thrs.tolist()\n    else:\n        _iou_thrs = iou_thrs\n    if isinstance(recalls, np.ndarray):\n        _recalls = recalls.tolist()\n    else:\n        _recalls = recalls\n\n    import matplotlib.pyplot as plt\n    f = plt.figure()\n    plt.plot(_iou_thrs + [1.0], _recalls + [0.])\n    plt.xlabel('IoU')\n    plt.ylabel('Recall')\n    plt.axis([iou_thrs.min(), 1, 0, 1])\n    f.show()\n"
  },
  {
    "path": "mmdet/core/export/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx,\n                          get_k_for_topk)\nfrom .pytorch2onnx import (build_model_from_cfg,\n                           generate_inputs_and_wrap_model,\n                           preprocess_example_input)\n\n__all__ = [\n    'build_model_from_cfg', 'generate_inputs_and_wrap_model',\n    'preprocess_example_input', 'get_k_for_topk', 'add_dummy_nms_for_onnx',\n    'dynamic_clip_for_onnx'\n]\n"
  },
  {
    "path": "mmdet/core/export/model_wrappers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport warnings\n\nimport numpy as np\nimport torch\n\nfrom mmdet.core import bbox2result\nfrom mmdet.models import BaseDetector\n\n\nclass DeployBaseDetector(BaseDetector):\n    \"\"\"DeployBaseDetector.\"\"\"\n\n    def __init__(self, class_names, device_id):\n        super(DeployBaseDetector, self).__init__()\n        self.CLASSES = class_names\n        self.device_id = device_id\n\n    def simple_test(self, img, img_metas, **kwargs):\n        raise NotImplementedError('This method is not implemented.')\n\n    def aug_test(self, imgs, img_metas, **kwargs):\n        raise NotImplementedError('This method is not implemented.')\n\n    def extract_feat(self, imgs):\n        raise NotImplementedError('This method is not implemented.')\n\n    def forward_train(self, imgs, img_metas, **kwargs):\n        raise NotImplementedError('This method is not implemented.')\n\n    def val_step(self, data, optimizer):\n        raise NotImplementedError('This method is not implemented.')\n\n    def train_step(self, data, optimizer):\n        raise NotImplementedError('This method is not implemented.')\n\n    def forward_test(self, *, img, img_metas, **kwargs):\n        raise NotImplementedError('This method is not implemented.')\n\n    def async_simple_test(self, img, img_metas, **kwargs):\n        raise NotImplementedError('This method is not implemented.')\n\n    def forward(self, img, img_metas, return_loss=True, **kwargs):\n        outputs = self.forward_test(img, img_metas, **kwargs)\n        batch_dets, batch_labels = outputs[:2]\n        batch_masks = outputs[2] if len(outputs) == 3 else None\n        batch_size = img[0].shape[0]\n        img_metas = img_metas[0]\n        results = []\n        rescale = kwargs.get('rescale', True)\n        for i in range(batch_size):\n            dets, labels = batch_dets[i], batch_labels[i]\n            if rescale:\n                scale_factor = img_metas[i]['scale_factor']\n\n                if isinstance(scale_factor, (list, tuple, np.ndarray)):\n                    assert len(scale_factor) == 4\n                    scale_factor = np.array(scale_factor)[None, :]  # [1,4]\n                dets[:, :4] /= scale_factor\n\n            if 'border' in img_metas[i]:\n                # offset pixel of the top-left corners between original image\n                # and padded/enlarged image, 'border' is used when exporting\n                # CornerNet and CentripetalNet to onnx\n                x_off = img_metas[i]['border'][2]\n                y_off = img_metas[i]['border'][0]\n                dets[:, [0, 2]] -= x_off\n                dets[:, [1, 3]] -= y_off\n                dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype)\n\n            dets_results = bbox2result(dets, labels, len(self.CLASSES))\n\n            if batch_masks is not None:\n                masks = batch_masks[i]\n                img_h, img_w = img_metas[i]['img_shape'][:2]\n                ori_h, ori_w = img_metas[i]['ori_shape'][:2]\n                masks = masks[:, :img_h, :img_w]\n                if rescale:\n                    masks = masks.astype(np.float32)\n                    masks = torch.from_numpy(masks)\n                    masks = torch.nn.functional.interpolate(\n                        masks.unsqueeze(0), size=(ori_h, ori_w))\n                    masks = masks.squeeze(0).detach().numpy()\n                if masks.dtype != bool:\n                    masks = masks >= 0.5\n                segms_results = [[] for _ in range(len(self.CLASSES))]\n                for j in range(len(dets)):\n                    segms_results[labels[j]].append(masks[j])\n                results.append((dets_results, segms_results))\n            else:\n                results.append(dets_results)\n        return results\n\n\nclass ONNXRuntimeDetector(DeployBaseDetector):\n    \"\"\"Wrapper for detector's inference with ONNXRuntime.\"\"\"\n\n    def __init__(self, onnx_file, class_names, device_id):\n        super(ONNXRuntimeDetector, self).__init__(class_names, device_id)\n        import onnxruntime as ort\n\n        # get the custom op path\n        ort_custom_op_path = ''\n        try:\n            from mmcv.ops import get_onnxruntime_op_path\n            ort_custom_op_path = get_onnxruntime_op_path()\n        except (ImportError, ModuleNotFoundError):\n            warnings.warn('If input model has custom op from mmcv, \\\n                you may have to build mmcv with ONNXRuntime from source.')\n        session_options = ort.SessionOptions()\n        # register custom op for onnxruntime\n        if osp.exists(ort_custom_op_path):\n            session_options.register_custom_ops_library(ort_custom_op_path)\n        sess = ort.InferenceSession(onnx_file, session_options)\n        providers = ['CPUExecutionProvider']\n        options = [{}]\n        is_cuda_available = ort.get_device() == 'GPU'\n        if is_cuda_available:\n            providers.insert(0, 'CUDAExecutionProvider')\n            options.insert(0, {'device_id': device_id})\n\n        sess.set_providers(providers, options)\n\n        self.sess = sess\n        self.io_binding = sess.io_binding()\n        self.output_names = [_.name for _ in sess.get_outputs()]\n        self.is_cuda_available = is_cuda_available\n\n    def forward_test(self, imgs, img_metas, **kwargs):\n        input_data = imgs[0]\n        # set io binding for inputs/outputs\n        device_type = 'cuda' if self.is_cuda_available else 'cpu'\n        if not self.is_cuda_available:\n            input_data = input_data.cpu()\n        self.io_binding.bind_input(\n            name='input',\n            device_type=device_type,\n            device_id=self.device_id,\n            element_type=np.float32,\n            shape=input_data.shape,\n            buffer_ptr=input_data.data_ptr())\n\n        for name in self.output_names:\n            self.io_binding.bind_output(name)\n        # run session to get outputs\n        self.sess.run_with_iobinding(self.io_binding)\n        ort_outputs = self.io_binding.copy_outputs_to_cpu()\n        return ort_outputs\n\n\nclass TensorRTDetector(DeployBaseDetector):\n    \"\"\"Wrapper for detector's inference with TensorRT.\"\"\"\n\n    def __init__(self, engine_file, class_names, device_id, output_names=None):\n        super(TensorRTDetector, self).__init__(class_names, device_id)\n        warnings.warn('`output_names` is deprecated and will be removed in '\n                      'future releases.')\n        from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin\n        try:\n            load_tensorrt_plugin()\n        except (ImportError, ModuleNotFoundError):\n            warnings.warn('If input model has custom op from mmcv, \\\n                you may have to build mmcv with TensorRT from source.')\n\n        output_names = ['dets', 'labels']\n        model = TRTWraper(engine_file, ['input'], output_names)\n        with_masks = False\n        # if TensorRT has totally 4 inputs/outputs, then\n        # the detector should have `mask` output.\n        if len(model.engine) == 4:\n            model.output_names = output_names + ['masks']\n            with_masks = True\n        self.model = model\n        self.with_masks = with_masks\n\n    def forward_test(self, imgs, img_metas, **kwargs):\n        input_data = imgs[0].contiguous()\n        with torch.cuda.device(self.device_id), torch.no_grad():\n            outputs = self.model({'input': input_data})\n            outputs = [outputs[name] for name in self.model.output_names]\n        outputs = [out.detach().cpu().numpy() for out in outputs]\n        return outputs\n"
  },
  {
    "path": "mmdet/core/export/onnx_helper.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\n\nimport torch\n\n\ndef dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape):\n    \"\"\"Clip boxes dynamically for onnx.\n\n    Since torch.clamp cannot have dynamic `min` and `max`, we scale the\n      boxes by 1/max_shape and clamp in the range [0, 1].\n\n    Args:\n        x1 (Tensor): The x1 for bounding boxes.\n        y1 (Tensor): The y1 for bounding boxes.\n        x2 (Tensor): The x2 for bounding boxes.\n        y2 (Tensor): The y2 for bounding boxes.\n        max_shape (Tensor or torch.Size): The (H,W) of original image.\n    Returns:\n        tuple(Tensor): The clipped x1, y1, x2, y2.\n    \"\"\"\n    assert isinstance(\n        max_shape,\n        torch.Tensor), '`max_shape` should be tensor of (h,w) for onnx'\n\n    # scale by 1/max_shape\n    x1 = x1 / max_shape[1]\n    y1 = y1 / max_shape[0]\n    x2 = x2 / max_shape[1]\n    y2 = y2 / max_shape[0]\n\n    # clamp [0, 1]\n    x1 = torch.clamp(x1, 0, 1)\n    y1 = torch.clamp(y1, 0, 1)\n    x2 = torch.clamp(x2, 0, 1)\n    y2 = torch.clamp(y2, 0, 1)\n\n    # scale back\n    x1 = x1 * max_shape[1]\n    y1 = y1 * max_shape[0]\n    x2 = x2 * max_shape[1]\n    y2 = y2 * max_shape[0]\n    return x1, y1, x2, y2\n\n\ndef get_k_for_topk(k, size):\n    \"\"\"Get k of TopK for onnx exporting.\n\n    The K of TopK in TensorRT should not be a Tensor, while in ONNX Runtime\n      it could be a Tensor.Due to dynamic shape feature, we have to decide\n      whether to do TopK and what K it should be while exporting to ONNX.\n    If returned K is less than zero, it means we do not have to do\n      TopK operation.\n\n    Args:\n        k (int or Tensor): The set k value for nms from config file.\n        size (Tensor or torch.Size): The number of elements of \\\n            TopK's input tensor\n    Returns:\n        tuple: (int or Tensor): The final K for TopK.\n    \"\"\"\n    ret_k = -1\n    if k <= 0 or size <= 0:\n        return ret_k\n    if torch.onnx.is_in_onnx_export():\n        is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT'\n        if is_trt_backend:\n            # TensorRT does not support dynamic K with TopK op\n            if 0 < k < size:\n                ret_k = k\n        else:\n            # Always keep topk op for dynamic input in onnx for ONNX Runtime\n            ret_k = torch.where(k < size, k, size)\n    elif k < size:\n        ret_k = k\n    else:\n        # ret_k is -1\n        pass\n    return ret_k\n\n\ndef add_dummy_nms_for_onnx(boxes,\n                           scores,\n                           max_output_boxes_per_class=1000,\n                           iou_threshold=0.5,\n                           score_threshold=0.05,\n                           pre_top_k=-1,\n                           after_top_k=-1,\n                           labels=None):\n    \"\"\"Create a dummy onnx::NonMaxSuppression op while exporting to ONNX.\n\n    This function helps exporting to onnx with batch and multiclass NMS op.\n    It only supports class-agnostic detection results. That is, the scores\n    is of shape (N, num_bboxes, num_classes) and the boxes is of shape\n    (N, num_boxes, 4).\n\n    Args:\n        boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4]\n        scores (Tensor): The detection scores of shape\n            [N, num_boxes, num_classes]\n        max_output_boxes_per_class (int): Maximum number of output\n            boxes per class of nms. Defaults to 1000.\n        iou_threshold (float): IOU threshold of nms. Defaults to 0.5\n        score_threshold (float): score threshold of nms.\n            Defaults to 0.05.\n        pre_top_k (bool): Number of top K boxes to keep before nms.\n            Defaults to -1.\n        after_top_k (int): Number of top K boxes to keep after nms.\n            Defaults to -1.\n        labels (Tensor, optional): It not None, explicit labels would be used.\n            Otherwise, labels would be automatically generated using\n            num_classed. Defaults to None.\n\n    Returns:\n        tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n            and class labels of shape [N, num_det].\n    \"\"\"\n    max_output_boxes_per_class = torch.LongTensor([max_output_boxes_per_class])\n    iou_threshold = torch.tensor([iou_threshold], dtype=torch.float32)\n    score_threshold = torch.tensor([score_threshold], dtype=torch.float32)\n    batch_size = scores.shape[0]\n    num_class = scores.shape[2]\n\n    nms_pre = torch.tensor(pre_top_k, device=scores.device, dtype=torch.long)\n    nms_pre = get_k_for_topk(nms_pre, boxes.shape[1])\n\n    if nms_pre > 0:\n        max_scores, _ = scores.max(-1)\n        _, topk_inds = max_scores.topk(nms_pre)\n        batch_inds = torch.arange(batch_size).view(\n            -1, 1).expand_as(topk_inds).long()\n        # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501\n        transformed_inds = boxes.shape[1] * batch_inds + topk_inds\n        boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape(\n            batch_size, -1, 4)\n        scores = scores.reshape(-1, num_class)[transformed_inds, :].reshape(\n            batch_size, -1, num_class)\n        if labels is not None:\n            labels = labels.reshape(-1, 1)[transformed_inds].reshape(\n                batch_size, -1)\n\n    scores = scores.permute(0, 2, 1)\n    num_box = boxes.shape[1]\n    # turn off tracing to create a dummy output of nms\n    state = torch._C._get_tracing_state()\n    # dummy indices of nms's output\n    num_fake_det = 2\n    batch_inds = torch.randint(batch_size, (num_fake_det, 1))\n    cls_inds = torch.randint(num_class, (num_fake_det, 1))\n    box_inds = torch.randint(num_box, (num_fake_det, 1))\n    indices = torch.cat([batch_inds, cls_inds, box_inds], dim=1)\n    output = indices\n    setattr(DummyONNXNMSop, 'output', output)\n\n    # open tracing\n    torch._C._set_tracing_state(state)\n    selected_indices = DummyONNXNMSop.apply(boxes, scores,\n                                            max_output_boxes_per_class,\n                                            iou_threshold, score_threshold)\n\n    batch_inds, cls_inds = selected_indices[:, 0], selected_indices[:, 1]\n    box_inds = selected_indices[:, 2]\n    if labels is None:\n        labels = torch.arange(num_class, dtype=torch.long).to(scores.device)\n        labels = labels.view(1, num_class, 1).expand_as(scores)\n    scores = scores.reshape(-1, 1)\n    boxes = boxes.reshape(batch_size, -1).repeat(1, num_class).reshape(-1, 4)\n    pos_inds = (num_class * batch_inds + cls_inds) * num_box + box_inds\n    mask = scores.new_zeros(scores.shape)\n    # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501\n    # PyTorch style code: mask[batch_inds, box_inds] += 1\n    mask[pos_inds, :] += 1\n    scores = scores * mask\n    boxes = boxes * mask\n\n    scores = scores.reshape(batch_size, -1)\n    boxes = boxes.reshape(batch_size, -1, 4)\n    labels = labels.reshape(batch_size, -1)\n\n    nms_after = torch.tensor(\n        after_top_k, device=scores.device, dtype=torch.long)\n    nms_after = get_k_for_topk(nms_after, num_box * num_class)\n\n    if nms_after > 0:\n        _, topk_inds = scores.topk(nms_after)\n        batch_inds = torch.arange(batch_size).view(-1, 1).expand_as(topk_inds)\n        # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501\n        transformed_inds = scores.shape[1] * batch_inds + topk_inds\n        scores = scores.reshape(-1, 1)[transformed_inds, :].reshape(\n            batch_size, -1)\n        boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape(\n            batch_size, -1, 4)\n        labels = labels.reshape(-1, 1)[transformed_inds, :].reshape(\n            batch_size, -1)\n\n    scores = scores.unsqueeze(2)\n    dets = torch.cat([boxes, scores], dim=2)\n    return dets, labels\n\n\nclass DummyONNXNMSop(torch.autograd.Function):\n    \"\"\"DummyONNXNMSop.\n\n    This class is only for creating onnx::NonMaxSuppression.\n    \"\"\"\n\n    @staticmethod\n    def forward(ctx, boxes, scores, max_output_boxes_per_class, iou_threshold,\n                score_threshold):\n\n        return DummyONNXNMSop.output\n\n    @staticmethod\n    def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold,\n                 score_threshold):\n        return g.op(\n            'NonMaxSuppression',\n            boxes,\n            scores,\n            max_output_boxes_per_class,\n            iou_threshold,\n            score_threshold,\n            outputs=1)\n"
  },
  {
    "path": "mmdet/core/export/pytorch2onnx.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom functools import partial\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.runner import load_checkpoint\n\n\ndef generate_inputs_and_wrap_model(config_path,\n                                   checkpoint_path,\n                                   input_config,\n                                   cfg_options=None):\n    \"\"\"Prepare sample input and wrap model for ONNX export.\n\n    The ONNX export API only accept args, and all inputs should be\n    torch.Tensor or corresponding types (such as tuple of tensor).\n    So we should call this function before exporting. This function will:\n\n    1. generate corresponding inputs which are used to execute the model.\n    2. Wrap the model's forward function.\n\n    For example, the MMDet models' forward function has a parameter\n    ``return_loss:bool``. As we want to set it as False while export API\n    supports neither bool type or kwargs. So we have to replace the forward\n    method like ``model.forward = partial(model.forward, return_loss=False)``.\n\n    Args:\n        config_path (str): the OpenMMLab config for the model we want to\n            export to ONNX\n        checkpoint_path (str): Path to the corresponding checkpoint\n        input_config (dict): the exactly data in this dict depends on the\n            framework. For MMSeg, we can just declare the input shape,\n            and generate the dummy data accordingly. However, for MMDet,\n            we may pass the real img path, or the NMS will return None\n            as there is no legal bbox.\n\n    Returns:\n        tuple: (model, tensor_data) wrapped model which can be called by\n            ``model(*tensor_data)`` and a list of inputs which are used to\n            execute the model while exporting.\n    \"\"\"\n\n    model = build_model_from_cfg(\n        config_path, checkpoint_path, cfg_options=cfg_options)\n    one_img, one_meta = preprocess_example_input(input_config)\n    tensor_data = [one_img]\n    model.forward = partial(\n        model.forward, img_metas=[[one_meta]], return_loss=False)\n\n    # pytorch has some bug in pytorch1.3, we have to fix it\n    # by replacing these existing op\n    opset_version = 11\n    # put the import within the function thus it will not cause import error\n    # when not using this function\n    try:\n        from mmcv.onnx.symbolic import register_extra_symbolics\n    except ModuleNotFoundError:\n        raise NotImplementedError('please update mmcv to version>=v1.0.4')\n    register_extra_symbolics(opset_version)\n\n    return model, tensor_data\n\n\ndef build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):\n    \"\"\"Build a model from config and load the given checkpoint.\n\n    Args:\n        config_path (str): the OpenMMLab config for the model we want to\n            export to ONNX\n        checkpoint_path (str): Path to the corresponding checkpoint\n\n    Returns:\n        torch.nn.Module: the built model\n    \"\"\"\n    from mmdet.models import build_detector\n\n    cfg = mmcv.Config.fromfile(config_path)\n    if cfg_options is not None:\n        cfg.merge_from_dict(cfg_options)\n    # set cudnn_benchmark\n    if cfg.get('cudnn_benchmark', False):\n        torch.backends.cudnn.benchmark = True\n    cfg.model.pretrained = None\n    cfg.data.test.test_mode = True\n\n    # build the model\n    cfg.model.train_cfg = None\n    model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))\n    checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu')\n    if 'CLASSES' in checkpoint.get('meta', {}):\n        model.CLASSES = checkpoint['meta']['CLASSES']\n    else:\n        from mmdet.datasets import DATASETS\n        dataset = DATASETS.get(cfg.data.test['type'])\n        assert (dataset is not None)\n        model.CLASSES = dataset.CLASSES\n    model.cpu().eval()\n    return model\n\n\ndef preprocess_example_input(input_config):\n    \"\"\"Prepare an example input image for ``generate_inputs_and_wrap_model``.\n\n    Args:\n        input_config (dict): customized config describing the example input.\n\n    Returns:\n        tuple: (one_img, one_meta), tensor of the example input image and \\\n            meta information for the example input image.\n\n    Examples:\n        >>> from mmdet.core.export import preprocess_example_input\n        >>> input_config = {\n        >>>         'input_shape': (1,3,224,224),\n        >>>         'input_path': 'demo/demo.jpg',\n        >>>         'normalize_cfg': {\n        >>>             'mean': (123.675, 116.28, 103.53),\n        >>>             'std': (58.395, 57.12, 57.375)\n        >>>             }\n        >>>         }\n        >>> one_img, one_meta = preprocess_example_input(input_config)\n        >>> print(one_img.shape)\n        torch.Size([1, 3, 224, 224])\n        >>> print(one_meta)\n        {'img_shape': (224, 224, 3),\n        'ori_shape': (224, 224, 3),\n        'pad_shape': (224, 224, 3),\n        'filename': '<demo>.png',\n        'scale_factor': 1.0,\n        'flip': False}\n    \"\"\"\n    input_path = input_config['input_path']\n    input_shape = input_config['input_shape']\n    one_img = mmcv.imread(input_path)\n    one_img = mmcv.imresize(one_img, input_shape[2:][::-1])\n    show_img = one_img.copy()\n    if 'normalize_cfg' in input_config.keys():\n        normalize_cfg = input_config['normalize_cfg']\n        mean = np.array(normalize_cfg['mean'], dtype=np.float32)\n        std = np.array(normalize_cfg['std'], dtype=np.float32)\n        to_rgb = normalize_cfg.get('to_rgb', True)\n        one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb)\n    one_img = one_img.transpose(2, 0, 1)\n    one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_(\n        True)\n    (_, C, H, W) = input_shape\n    one_meta = {\n        'img_shape': (H, W, C),\n        'ori_shape': (H, W, C),\n        'pad_shape': (H, W, C),\n        'filename': '<demo>.png',\n        'scale_factor': np.ones(4, dtype=np.float32),\n        'flip': False,\n        'show_img': show_img,\n        'flip_direction': None\n    }\n\n    return one_img, one_meta\n"
  },
  {
    "path": "mmdet/core/hook/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .checkloss_hook import CheckInvalidLossHook\nfrom .ema import ExpMomentumEMAHook, LinearMomentumEMAHook\nfrom .memory_profiler_hook import MemoryProfilerHook\nfrom .set_epoch_info_hook import SetEpochInfoHook\nfrom .sync_norm_hook import SyncNormHook\nfrom .sync_random_size_hook import SyncRandomSizeHook\nfrom .wandblogger_hook import MMDetWandbHook\nfrom .yolox_lrupdater_hook import YOLOXLrUpdaterHook\nfrom .yolox_mode_switch_hook import YOLOXModeSwitchHook\n\n__all__ = [\n    'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook',\n    'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook',\n    'CheckInvalidLossHook', 'SetEpochInfoHook', 'MemoryProfilerHook',\n    'MMDetWandbHook'\n]\n"
  },
  {
    "path": "mmdet/core/hook/checkloss_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.runner.hooks import HOOKS, Hook\n\n\n@HOOKS.register_module()\nclass CheckInvalidLossHook(Hook):\n    \"\"\"Check invalid loss hook.\n\n    This hook will regularly check whether the loss is valid\n    during training.\n\n    Args:\n        interval (int): Checking interval (every k iterations).\n            Default: 50.\n    \"\"\"\n\n    def __init__(self, interval=50):\n        self.interval = interval\n\n    def after_train_iter(self, runner):\n        if self.every_n_iters(runner, self.interval):\n            assert torch.isfinite(runner.outputs['loss']), \\\n                runner.logger.info('loss become infinite or NaN!')\n"
  },
  {
    "path": "mmdet/core/hook/ema.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nfrom mmcv.parallel import is_module_wrapper\nfrom mmcv.runner.hooks import HOOKS, Hook\n\n\nclass BaseEMAHook(Hook):\n    \"\"\"Exponential Moving Average Hook.\n\n    Use Exponential Moving Average on all parameters of model in training\n    process. All parameters have a ema backup, which update by the formula\n    as below. EMAHook takes priority over EvalHook and CheckpointHook. Note,\n    the original model parameters are actually saved in ema field after train.\n\n    Args:\n        momentum (float): The momentum used for updating ema parameter.\n            Ema's parameter are updated with the formula:\n           `ema_param = (1-momentum) * ema_param + momentum * cur_param`.\n            Defaults to 0.0002.\n        skip_buffers (bool): Whether to skip the model buffers, such as\n            batchnorm running stats (running_mean, running_var), it does not\n            perform the ema operation. Default to False.\n        interval (int): Update ema parameter every interval iteration.\n            Defaults to 1.\n        resume_from (str, optional): The checkpoint path. Defaults to None.\n        momentum_fun (func, optional): The function to change momentum\n            during early iteration (also warmup) to help early training.\n            It uses `momentum` as a constant. Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 momentum=0.0002,\n                 interval=1,\n                 skip_buffers=False,\n                 resume_from=None,\n                 momentum_fun=None):\n        assert 0 < momentum < 1\n        self.momentum = momentum\n        self.skip_buffers = skip_buffers\n        self.interval = interval\n        self.checkpoint = resume_from\n        self.momentum_fun = momentum_fun\n\n    def before_run(self, runner):\n        \"\"\"To resume model with it's ema parameters more friendly.\n\n        Register ema parameter as ``named_buffer`` to model.\n        \"\"\"\n        model = runner.model\n        if is_module_wrapper(model):\n            model = model.module\n        self.param_ema_buffer = {}\n        if self.skip_buffers:\n            self.model_parameters = dict(model.named_parameters())\n        else:\n            self.model_parameters = model.state_dict()\n        for name, value in self.model_parameters.items():\n            # \".\" is not allowed in module's buffer name\n            buffer_name = f\"ema_{name.replace('.', '_')}\"\n            self.param_ema_buffer[name] = buffer_name\n            model.register_buffer(buffer_name, value.data.clone())\n        self.model_buffers = dict(model.named_buffers())\n        if self.checkpoint is not None:\n            runner.resume(self.checkpoint)\n\n    def get_momentum(self, runner):\n        return self.momentum_fun(runner.iter) if self.momentum_fun else \\\n                        self.momentum\n\n    def after_train_iter(self, runner):\n        \"\"\"Update ema parameter every self.interval iterations.\"\"\"\n        if (runner.iter + 1) % self.interval != 0:\n            return\n        momentum = self.get_momentum(runner)\n        for name, parameter in self.model_parameters.items():\n            # exclude num_tracking\n            if parameter.dtype.is_floating_point:\n                buffer_name = self.param_ema_buffer[name]\n                buffer_parameter = self.model_buffers[buffer_name]\n                buffer_parameter.mul_(1 - momentum).add_(\n                    parameter.data, alpha=momentum)\n\n    def after_train_epoch(self, runner):\n        \"\"\"We load parameter values from ema backup to model before the\n        EvalHook.\"\"\"\n        self._swap_ema_parameters()\n\n    def before_train_epoch(self, runner):\n        \"\"\"We recover model's parameter from ema backup after last epoch's\n        EvalHook.\"\"\"\n        self._swap_ema_parameters()\n\n    def _swap_ema_parameters(self):\n        \"\"\"Swap the parameter of model with parameter in ema_buffer.\"\"\"\n        for name, value in self.model_parameters.items():\n            temp = value.data.clone()\n            ema_buffer = self.model_buffers[self.param_ema_buffer[name]]\n            value.data.copy_(ema_buffer.data)\n            ema_buffer.data.copy_(temp)\n\n\n@HOOKS.register_module()\nclass ExpMomentumEMAHook(BaseEMAHook):\n    \"\"\"EMAHook using exponential momentum strategy.\n\n    Args:\n        total_iter (int): The total number of iterations of EMA momentum.\n           Defaults to 2000.\n    \"\"\"\n\n    def __init__(self, total_iter=2000, **kwargs):\n        super(ExpMomentumEMAHook, self).__init__(**kwargs)\n        self.momentum_fun = lambda x: (1 - self.momentum) * math.exp(-(\n            1 + x) / total_iter) + self.momentum\n\n\n@HOOKS.register_module()\nclass LinearMomentumEMAHook(BaseEMAHook):\n    \"\"\"EMAHook using linear momentum strategy.\n\n    Args:\n        warm_up (int): During first warm_up steps, we may use smaller decay\n            to update ema parameters more slowly. Defaults to 100.\n    \"\"\"\n\n    def __init__(self, warm_up=100, **kwargs):\n        super(LinearMomentumEMAHook, self).__init__(**kwargs)\n        self.momentum_fun = lambda x: min(self.momentum**self.interval,\n                                          (1 + x) / (warm_up + x))\n"
  },
  {
    "path": "mmdet/core/hook/memory_profiler_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.runner.hooks import HOOKS, Hook\n\n\n@HOOKS.register_module()\nclass MemoryProfilerHook(Hook):\n    \"\"\"Memory profiler hook recording memory information including virtual\n    memory, swap memory, and the memory of the current process.\n\n    Args:\n        interval (int): Checking interval (every k iterations).\n            Default: 50.\n    \"\"\"\n\n    def __init__(self, interval=50):\n        try:\n            from psutil import swap_memory, virtual_memory\n            self._swap_memory = swap_memory\n            self._virtual_memory = virtual_memory\n        except ImportError:\n            raise ImportError('psutil is not installed, please install it by: '\n                              'pip install psutil')\n\n        try:\n            from memory_profiler import memory_usage\n            self._memory_usage = memory_usage\n        except ImportError:\n            raise ImportError(\n                'memory_profiler is not installed, please install it by: '\n                'pip install memory_profiler')\n\n        self.interval = interval\n\n    def after_iter(self, runner):\n        if self.every_n_iters(runner, self.interval):\n            # in Byte\n            virtual_memory = self._virtual_memory()\n            swap_memory = self._swap_memory()\n            # in MB\n            process_memory = self._memory_usage()[0]\n            factor = 1024 * 1024\n            runner.logger.info(\n                'Memory information '\n                'available_memory: '\n                f'{round(virtual_memory.available / factor)} MB, '\n                'used_memory: '\n                f'{round(virtual_memory.used / factor)} MB, '\n                f'memory_utilization: {virtual_memory.percent} %, '\n                'available_swap_memory: '\n                f'{round((swap_memory.total - swap_memory.used) / factor)}'\n                ' MB, '\n                f'used_swap_memory: {round(swap_memory.used / factor)} MB, '\n                f'swap_memory_utilization: {swap_memory.percent} %, '\n                'current_process_memory: '\n                f'{round(process_memory)} MB')\n"
  },
  {
    "path": "mmdet/core/hook/set_epoch_info_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.parallel import is_module_wrapper\nfrom mmcv.runner import HOOKS, Hook\n\n\n@HOOKS.register_module()\nclass SetEpochInfoHook(Hook):\n    \"\"\"Set runner's epoch information to the model.\"\"\"\n\n    def before_train_epoch(self, runner):\n        epoch = runner.epoch\n        model = runner.model\n        if is_module_wrapper(model):\n            model = model.module\n        model.set_epoch(epoch)\n"
  },
  {
    "path": "mmdet/core/hook/sync_norm_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections import OrderedDict\n\nfrom mmcv.runner import get_dist_info\nfrom mmcv.runner.hooks import HOOKS, Hook\nfrom torch import nn\n\nfrom ..utils.dist_utils import all_reduce_dict\n\n\ndef get_norm_states(module):\n    async_norm_states = OrderedDict()\n    for name, child in module.named_modules():\n        if isinstance(child, nn.modules.batchnorm._NormBase):\n            for k, v in child.state_dict().items():\n                async_norm_states['.'.join([name, k])] = v\n    return async_norm_states\n\n\n@HOOKS.register_module()\nclass SyncNormHook(Hook):\n    \"\"\"Synchronize Norm states after training epoch, currently used in YOLOX.\n\n    Args:\n        num_last_epochs (int): The number of latter epochs in the end of the\n            training to switch to synchronizing norm interval. Default: 15.\n        interval (int): Synchronizing norm interval. Default: 1.\n    \"\"\"\n\n    def __init__(self, num_last_epochs=15, interval=1):\n        self.interval = interval\n        self.num_last_epochs = num_last_epochs\n\n    def before_train_epoch(self, runner):\n        epoch = runner.epoch\n        if (epoch + 1) == runner.max_epochs - self.num_last_epochs:\n            # Synchronize norm every epoch.\n            self.interval = 1\n\n    def after_train_epoch(self, runner):\n        \"\"\"Synchronizing norm.\"\"\"\n        epoch = runner.epoch\n        module = runner.model\n        if (epoch + 1) % self.interval == 0:\n            _, world_size = get_dist_info()\n            if world_size == 1:\n                return\n            norm_states = get_norm_states(module)\n            if len(norm_states) == 0:\n                return\n            norm_states = all_reduce_dict(norm_states, op='mean')\n            module.load_state_dict(norm_states, strict=False)\n"
  },
  {
    "path": "mmdet/core/hook/sync_random_size_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport random\nimport warnings\n\nimport torch\nfrom mmcv.runner import get_dist_info\nfrom mmcv.runner.hooks import HOOKS, Hook\nfrom torch import distributed as dist\n\n\n@HOOKS.register_module()\nclass SyncRandomSizeHook(Hook):\n    \"\"\"Change and synchronize the random image size across ranks.\n    SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve\n    similar functions. Such as `dict(type='Resize', img_scale=[(448, 448),\n    (832, 832)], multiscale_mode='range', keep_ratio=True)`.\n\n    Note: Due to the multi-process dataloader, its behavior is different\n    from YOLOX's official implementation, the official is to change the\n    size every fixed iteration interval and what we achieved is a fixed\n    epoch interval.\n\n    Args:\n        ratio_range (tuple[int]): Random ratio range. It will be multiplied\n            by 32, and then change the dataset output image size.\n            Default: (14, 26).\n        img_scale (tuple[int]): Size of input image. Default: (640, 640).\n        interval (int): The epoch interval of change image size. Default: 1.\n        device (torch.device | str): device for returned tensors.\n            Default: 'cuda'.\n    \"\"\"\n\n    def __init__(self,\n                 ratio_range=(14, 26),\n                 img_scale=(640, 640),\n                 interval=1,\n                 device='cuda'):\n        warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. '\n                      'Please use Resize pipeline to achieve similar '\n                      'functions. Due to the multi-process dataloader, '\n                      'its behavior is different from YOLOX\\'s official '\n                      'implementation, the official is to change the size '\n                      'every fixed iteration interval and what we achieved '\n                      'is a fixed epoch interval.')\n        self.rank, world_size = get_dist_info()\n        self.is_distributed = world_size > 1\n        self.ratio_range = ratio_range\n        self.img_scale = img_scale\n        self.interval = interval\n        self.device = device\n\n    def after_train_epoch(self, runner):\n        \"\"\"Change the dataset output image size.\"\"\"\n        if self.ratio_range is not None and (runner.epoch +\n                                             1) % self.interval == 0:\n            # Due to DDP and DP get the device behavior inconsistent,\n            # so we did not get the device from runner.model.\n            tensor = torch.LongTensor(2).to(self.device)\n\n            if self.rank == 0:\n                size_factor = self.img_scale[1] * 1. / self.img_scale[0]\n                size = random.randint(*self.ratio_range)\n                size = (int(32 * size), 32 * int(size * size_factor))\n                tensor[0] = size[0]\n                tensor[1] = size[1]\n\n            if self.is_distributed:\n                dist.barrier()\n                dist.broadcast(tensor, 0)\n\n            runner.data_loader.dataset.update_dynamic_scale(\n                (tensor[0].item(), tensor[1].item()))\n"
  },
  {
    "path": "mmdet/core/hook/wandblogger_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport importlib\nimport os.path as osp\nimport sys\nimport warnings\n\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as mask_util\nfrom mmcv.runner import HOOKS\nfrom mmcv.runner.dist_utils import master_only\nfrom mmcv.runner.hooks.checkpoint import CheckpointHook\nfrom mmcv.runner.hooks.logger.wandb import WandbLoggerHook\nfrom mmcv.utils import digit_version\n\nfrom mmdet.core import DistEvalHook, EvalHook\nfrom mmdet.core.mask.structures import polygon_to_bitmap\n\n\n@HOOKS.register_module()\nclass MMDetWandbHook(WandbLoggerHook):\n    \"\"\"Enhanced Wandb logger hook for MMDetection.\n\n    Comparing with the :cls:`mmcv.runner.WandbLoggerHook`, this hook can not\n    only automatically log all the metrics but also log the following extra\n    information - saves model checkpoints as W&B Artifact, and\n    logs model prediction as interactive W&B Tables.\n\n    - Metrics: The MMDetWandbHook will automatically log training\n        and validation metrics along with system metrics (CPU/GPU).\n\n    - Checkpointing: If `log_checkpoint` is True, the checkpoint saved at\n        every checkpoint interval will be saved as W&B Artifacts.\n        This depends on the : class:`mmcv.runner.CheckpointHook` whose priority\n        is higher than this hook. Please refer to\n        https://docs.wandb.ai/guides/artifacts/model-versioning\n        to learn more about model versioning with W&B Artifacts.\n\n    - Checkpoint Metadata: If evaluation results are available for a given\n        checkpoint artifact, it will have a metadata associated with it.\n        The metadata contains the evaluation metrics computed on validation\n        data with that checkpoint along with the current epoch. It depends\n        on `EvalHook` whose priority is more than MMDetWandbHook.\n\n    - Evaluation: At every evaluation interval, the `MMDetWandbHook` logs the\n        model prediction as interactive W&B Tables. The number of samples\n        logged is given by `num_eval_images`. Currently, the `MMDetWandbHook`\n        logs the predicted bounding boxes along with the ground truth at every\n        evaluation interval. This depends on the `EvalHook` whose priority is\n        more than `MMDetWandbHook`. Also note that the data is just logged once\n        and subsequent evaluation tables uses reference to the logged data\n        to save memory usage. Please refer to\n        https://docs.wandb.ai/guides/data-vis to learn more about W&B Tables.\n\n    For more details check out W&B's MMDetection docs:\n    https://docs.wandb.ai/guides/integrations/mmdetection\n\n    ```\n    Example:\n        log_config = dict(\n            ...\n            hooks=[\n                ...,\n                dict(type='MMDetWandbHook',\n                     init_kwargs={\n                         'entity': \"YOUR_ENTITY\",\n                         'project': \"YOUR_PROJECT_NAME\"\n                     },\n                     interval=50,\n                     log_checkpoint=True,\n                     log_checkpoint_metadata=True,\n                     num_eval_images=100,\n                     bbox_score_thr=0.3)\n            ])\n    ```\n\n    Args:\n        init_kwargs (dict): A dict passed to wandb.init to initialize\n            a W&B run. Please refer to https://docs.wandb.ai/ref/python/init\n            for possible key-value pairs.\n        interval (int): Logging interval (every k iterations). Defaults to 50.\n        log_checkpoint (bool): Save the checkpoint at every checkpoint interval\n            as W&B Artifacts. Use this for model versioning where each version\n            is a checkpoint. Defaults to False.\n        log_checkpoint_metadata (bool): Log the evaluation metrics computed\n            on the validation data with the checkpoint, along with current\n            epoch as a metadata to that checkpoint.\n            Defaults to True.\n        num_eval_images (int): The number of validation images to be logged.\n            If zero, the evaluation won't be logged. Defaults to 100.\n        bbox_score_thr (float): Threshold for bounding box scores.\n            Defaults to 0.3.\n    \"\"\"\n\n    def __init__(self,\n                 init_kwargs=None,\n                 interval=50,\n                 log_checkpoint=False,\n                 log_checkpoint_metadata=False,\n                 num_eval_images=100,\n                 bbox_score_thr=0.3,\n                 **kwargs):\n        super(MMDetWandbHook, self).__init__(init_kwargs, interval, **kwargs)\n\n        self.log_checkpoint = log_checkpoint\n        self.log_checkpoint_metadata = (\n            log_checkpoint and log_checkpoint_metadata)\n        self.num_eval_images = num_eval_images\n        self.bbox_score_thr = bbox_score_thr\n        self.log_evaluation = (num_eval_images > 0)\n        self.ckpt_hook: CheckpointHook = None\n        self.eval_hook: EvalHook = None\n\n    def import_wandb(self):\n        try:\n            import wandb\n            from wandb import init  # noqa\n\n            # Fix ResourceWarning when calling wandb.log in wandb v0.12.10.\n            # https://github.com/wandb/client/issues/2837\n            if digit_version(wandb.__version__) < digit_version('0.12.10'):\n                warnings.warn(\n                    f'The current wandb {wandb.__version__} is '\n                    f'lower than v0.12.10 will cause ResourceWarning '\n                    f'when calling wandb.log, Please run '\n                    f'\"pip install --upgrade wandb\"')\n\n        except ImportError:\n            raise ImportError(\n                'Please run \"pip install \"wandb>=0.12.10\"\" to install wandb')\n        self.wandb = wandb\n\n    @master_only\n    def before_run(self, runner):\n        super(MMDetWandbHook, self).before_run(runner)\n\n        # Save and Log config.\n        if runner.meta is not None and runner.meta.get('exp_name',\n                                                       None) is not None:\n            src_cfg_path = osp.join(runner.work_dir,\n                                    runner.meta.get('exp_name', None))\n            if osp.exists(src_cfg_path):\n                self.wandb.save(src_cfg_path, base_path=runner.work_dir)\n                self._update_wandb_config(runner)\n        else:\n            runner.logger.warning('No meta information found in the runner. ')\n\n        # Inspect CheckpointHook and EvalHook\n        for hook in runner.hooks:\n            if isinstance(hook, CheckpointHook):\n                self.ckpt_hook = hook\n            if isinstance(hook, (EvalHook, DistEvalHook)):\n                self.eval_hook = hook\n\n        # Check conditions to log checkpoint\n        if self.log_checkpoint:\n            if self.ckpt_hook is None:\n                self.log_checkpoint = False\n                self.log_checkpoint_metadata = False\n                runner.logger.warning(\n                    'To log checkpoint in MMDetWandbHook, `CheckpointHook` is'\n                    'required, please check hooks in the runner.')\n            else:\n                self.ckpt_interval = self.ckpt_hook.interval\n\n        # Check conditions to log evaluation\n        if self.log_evaluation or self.log_checkpoint_metadata:\n            if self.eval_hook is None:\n                self.log_evaluation = False\n                self.log_checkpoint_metadata = False\n                runner.logger.warning(\n                    'To log evaluation or checkpoint metadata in '\n                    'MMDetWandbHook, `EvalHook` or `DistEvalHook` in mmdet '\n                    'is required, please check whether the validation '\n                    'is enabled.')\n            else:\n                self.eval_interval = self.eval_hook.interval\n                self.val_dataset = self.eval_hook.dataloader.dataset\n                # Determine the number of samples to be logged.\n                if self.num_eval_images > len(self.val_dataset):\n                    self.num_eval_images = len(self.val_dataset)\n                    runner.logger.warning(\n                        f'The num_eval_images ({self.num_eval_images}) is '\n                        'greater than the total number of validation samples '\n                        f'({len(self.val_dataset)}). The complete validation '\n                        'dataset will be logged.')\n\n        # Check conditions to log checkpoint metadata\n        if self.log_checkpoint_metadata:\n            assert self.ckpt_interval % self.eval_interval == 0, \\\n                'To log checkpoint metadata in MMDetWandbHook, the interval ' \\\n                f'of checkpoint saving ({self.ckpt_interval}) should be ' \\\n                'divisible by the interval of evaluation ' \\\n                f'({self.eval_interval}).'\n\n        # Initialize evaluation table\n        if self.log_evaluation:\n            # Initialize data table\n            self._init_data_table()\n            # Add data to the data table\n            self._add_ground_truth(runner)\n            # Log ground truth data\n            self._log_data_table()\n\n    @master_only\n    def after_train_epoch(self, runner):\n        super(MMDetWandbHook, self).after_train_epoch(runner)\n\n        if not self.by_epoch:\n            return\n\n        # Log checkpoint and metadata.\n        if (self.log_checkpoint\n                and self.every_n_epochs(runner, self.ckpt_interval)\n                or (self.ckpt_hook.save_last and self.is_last_epoch(runner))):\n            if self.log_checkpoint_metadata and self.eval_hook:\n                metadata = {\n                    'epoch': runner.epoch + 1,\n                    **self._get_eval_results()\n                }\n            else:\n                metadata = None\n            aliases = [f'epoch_{runner.epoch + 1}', 'latest']\n            model_path = osp.join(self.ckpt_hook.out_dir,\n                                  f'epoch_{runner.epoch + 1}.pth')\n            self._log_ckpt_as_artifact(model_path, aliases, metadata)\n\n        # Save prediction table\n        if self.log_evaluation and self.eval_hook._should_evaluate(runner):\n            results = self.eval_hook.latest_results\n            # Initialize evaluation table\n            self._init_pred_table()\n            # Log predictions\n            self._log_predictions(results)\n            # Log the table\n            self._log_eval_table(runner.epoch + 1)\n\n    # for the reason of this double-layered structure, refer to\n    # https://github.com/open-mmlab/mmdetection/issues/8145#issuecomment-1345343076\n    def after_train_iter(self, runner):\n        if self.get_mode(runner) == 'train':\n            # An ugly patch. The iter-based eval hook will call the\n            # `after_train_iter` method of all logger hooks before evaluation.\n            # Use this trick to skip that call.\n            # Don't call super method at first, it will clear the log_buffer\n            return super(MMDetWandbHook, self).after_train_iter(runner)\n        else:\n            super(MMDetWandbHook, self).after_train_iter(runner)\n        self._after_train_iter(runner)\n\n    @master_only\n    def _after_train_iter(self, runner):\n        if self.by_epoch:\n            return\n\n        # Save checkpoint and metadata\n        if (self.log_checkpoint\n                and self.every_n_iters(runner, self.ckpt_interval)\n                or (self.ckpt_hook.save_last and self.is_last_iter(runner))):\n            if self.log_checkpoint_metadata and self.eval_hook:\n                metadata = {\n                    'iter': runner.iter + 1,\n                    **self._get_eval_results()\n                }\n            else:\n                metadata = None\n            aliases = [f'iter_{runner.iter + 1}', 'latest']\n            model_path = osp.join(self.ckpt_hook.out_dir,\n                                  f'iter_{runner.iter + 1}.pth')\n            self._log_ckpt_as_artifact(model_path, aliases, metadata)\n\n        # Save prediction table\n        if self.log_evaluation and self.eval_hook._should_evaluate(runner):\n            results = self.eval_hook.latest_results\n            # Initialize evaluation table\n            self._init_pred_table()\n            # Log predictions\n            self._log_predictions(results)\n            # Log the table\n            self._log_eval_table(runner.iter + 1)\n\n    @master_only\n    def after_run(self, runner):\n        self.wandb.finish()\n\n    def _update_wandb_config(self, runner):\n        \"\"\"Update wandb config.\"\"\"\n        # Import the config file.\n        sys.path.append(runner.work_dir)\n        config_filename = runner.meta['exp_name'][:-3]\n        configs = importlib.import_module(config_filename)\n        # Prepare a nested dict of config variables.\n        config_keys = [key for key in dir(configs) if not key.startswith('__')]\n        config_dict = {key: getattr(configs, key) for key in config_keys}\n        # Update the W&B config.\n        self.wandb.config.update(config_dict)\n\n    def _log_ckpt_as_artifact(self, model_path, aliases, metadata=None):\n        \"\"\"Log model checkpoint as  W&B Artifact.\n\n        Args:\n            model_path (str): Path of the checkpoint to log.\n            aliases (list): List of the aliases associated with this artifact.\n            metadata (dict, optional): Metadata associated with this artifact.\n        \"\"\"\n        model_artifact = self.wandb.Artifact(\n            f'run_{self.wandb.run.id}_model', type='model', metadata=metadata)\n        model_artifact.add_file(model_path)\n        self.wandb.log_artifact(model_artifact, aliases=aliases)\n\n    def _get_eval_results(self):\n        \"\"\"Get model evaluation results.\"\"\"\n        results = self.eval_hook.latest_results\n        eval_results = self.val_dataset.evaluate(\n            results, logger='silent', **self.eval_hook.eval_kwargs)\n        return eval_results\n\n    def _init_data_table(self):\n        \"\"\"Initialize the W&B Tables for validation data.\"\"\"\n        columns = ['image_name', 'image']\n        self.data_table = self.wandb.Table(columns=columns)\n\n    def _init_pred_table(self):\n        \"\"\"Initialize the W&B Tables for model evaluation.\"\"\"\n        columns = ['image_name', 'ground_truth', 'prediction']\n        self.eval_table = self.wandb.Table(columns=columns)\n\n    def _add_ground_truth(self, runner):\n        # Get image loading pipeline\n        from mmdet.datasets.pipelines import LoadImageFromFile\n        img_loader = None\n        for t in self.val_dataset.pipeline.transforms:\n            if isinstance(t, LoadImageFromFile):\n                img_loader = t\n\n        if img_loader is None:\n            self.log_evaluation = False\n            runner.logger.warning(\n                'LoadImageFromFile is required to add images '\n                'to W&B Tables.')\n            return\n\n        # Select the images to be logged.\n        self.eval_image_indexs = np.arange(len(self.val_dataset))\n        # Set seed so that same validation set is logged each time.\n        np.random.seed(42)\n        np.random.shuffle(self.eval_image_indexs)\n        self.eval_image_indexs = self.eval_image_indexs[:self.num_eval_images]\n\n        CLASSES = self.val_dataset.CLASSES\n        self.class_id_to_label = {\n            id + 1: name\n            for id, name in enumerate(CLASSES)\n        }\n        self.class_set = self.wandb.Classes([{\n            'id': id,\n            'name': name\n        } for id, name in self.class_id_to_label.items()])\n\n        img_prefix = self.val_dataset.img_prefix\n\n        for idx in self.eval_image_indexs:\n            img_info = self.val_dataset.data_infos[idx]\n            image_name = img_info.get('filename', f'img_{idx}')\n            img_height, img_width = img_info['height'], img_info['width']\n\n            img_meta = img_loader(\n                dict(img_info=img_info, img_prefix=img_prefix))\n\n            # Get image and convert from BGR to RGB\n            image = mmcv.bgr2rgb(img_meta['img'])\n\n            data_ann = self.val_dataset.get_ann_info(idx)\n            bboxes = data_ann['bboxes']\n            labels = data_ann['labels']\n            masks = data_ann.get('masks', None)\n\n            # Get dict of bounding boxes to be logged.\n            assert len(bboxes) == len(labels)\n            wandb_boxes = self._get_wandb_bboxes(bboxes, labels)\n\n            # Get dict of masks to be logged.\n            if masks is not None:\n                wandb_masks = self._get_wandb_masks(\n                    masks,\n                    labels,\n                    is_poly_mask=True,\n                    height=img_height,\n                    width=img_width)\n            else:\n                wandb_masks = None\n            # TODO: Panoramic segmentation visualization.\n\n            # Log a row to the data table.\n            self.data_table.add_data(\n                image_name,\n                self.wandb.Image(\n                    image,\n                    boxes=wandb_boxes,\n                    masks=wandb_masks,\n                    classes=self.class_set))\n\n    def _log_predictions(self, results):\n        table_idxs = self.data_table_ref.get_index()\n        assert len(table_idxs) == len(self.eval_image_indexs)\n\n        for ndx, eval_image_index in enumerate(self.eval_image_indexs):\n            # Get the result\n            result = results[eval_image_index]\n            if isinstance(result, tuple):\n                bbox_result, segm_result = result\n                if isinstance(segm_result, tuple):\n                    segm_result = segm_result[0]  # ms rcnn\n            else:\n                bbox_result, segm_result = result, None\n            assert len(bbox_result) == len(self.class_id_to_label)\n\n            # Get labels\n            bboxes = np.vstack(bbox_result)\n            labels = [\n                np.full(bbox.shape[0], i, dtype=np.int32)\n                for i, bbox in enumerate(bbox_result)\n            ]\n            labels = np.concatenate(labels)\n\n            # Get segmentation mask if available.\n            segms = None\n            if segm_result is not None and len(labels) > 0:\n                segms = mmcv.concat_list(segm_result)\n                segms = mask_util.decode(segms)\n                segms = segms.transpose(2, 0, 1)\n                assert len(segms) == len(labels)\n            # TODO: Panoramic segmentation visualization.\n\n            # Remove bounding boxes and masks with score lower than threshold.\n            if self.bbox_score_thr > 0:\n                assert bboxes is not None and bboxes.shape[1] == 5\n                scores = bboxes[:, -1]\n                inds = scores > self.bbox_score_thr\n                bboxes = bboxes[inds, :]\n                labels = labels[inds]\n                if segms is not None:\n                    segms = segms[inds, ...]\n\n            # Get dict of bounding boxes to be logged.\n            wandb_boxes = self._get_wandb_bboxes(bboxes, labels, log_gt=False)\n            # Get dict of masks to be logged.\n            if segms is not None:\n                wandb_masks = self._get_wandb_masks(segms, labels)\n            else:\n                wandb_masks = None\n\n            # Log a row to the eval table.\n            self.eval_table.add_data(\n                self.data_table_ref.data[ndx][0],\n                self.data_table_ref.data[ndx][1],\n                self.wandb.Image(\n                    self.data_table_ref.data[ndx][1],\n                    boxes=wandb_boxes,\n                    masks=wandb_masks,\n                    classes=self.class_set))\n\n    def _get_wandb_bboxes(self, bboxes, labels, log_gt=True):\n        \"\"\"Get list of structured dict for logging bounding boxes to W&B.\n\n        Args:\n            bboxes (list): List of bounding box coordinates in\n                        (minX, minY, maxX, maxY) format.\n            labels (int): List of label ids.\n            log_gt (bool): Whether to log ground truth or prediction boxes.\n\n        Returns:\n            Dictionary of bounding boxes to be logged.\n        \"\"\"\n        wandb_boxes = {}\n\n        box_data = []\n        for bbox, label in zip(bboxes, labels):\n            if not isinstance(label, int):\n                label = int(label)\n            label = label + 1\n\n            if len(bbox) == 5:\n                confidence = float(bbox[4])\n                class_name = self.class_id_to_label[label]\n                box_caption = f'{class_name} {confidence:.2f}'\n            else:\n                box_caption = str(self.class_id_to_label[label])\n\n            position = dict(\n                minX=int(bbox[0]),\n                minY=int(bbox[1]),\n                maxX=int(bbox[2]),\n                maxY=int(bbox[3]))\n\n            box_data.append({\n                'position': position,\n                'class_id': label,\n                'box_caption': box_caption,\n                'domain': 'pixel'\n            })\n\n        wandb_bbox_dict = {\n            'box_data': box_data,\n            'class_labels': self.class_id_to_label\n        }\n\n        if log_gt:\n            wandb_boxes['ground_truth'] = wandb_bbox_dict\n        else:\n            wandb_boxes['predictions'] = wandb_bbox_dict\n\n        return wandb_boxes\n\n    def _get_wandb_masks(self,\n                         masks,\n                         labels,\n                         is_poly_mask=False,\n                         height=None,\n                         width=None):\n        \"\"\"Get list of structured dict for logging masks to W&B.\n\n        Args:\n            masks (list): List of masks.\n            labels (int): List of label ids.\n            is_poly_mask (bool): Whether the mask is polygonal or not.\n                This is true for CocoDataset.\n            height (int): Height of the image.\n            width (int): Width of the image.\n\n        Returns:\n            Dictionary of masks to be logged.\n        \"\"\"\n        mask_label_dict = dict()\n        for mask, label in zip(masks, labels):\n            label = label + 1\n            # Get bitmap mask from polygon.\n            if is_poly_mask:\n                if height is not None and width is not None:\n                    mask = polygon_to_bitmap(mask, height, width)\n            # Create composite masks for each class.\n            if label not in mask_label_dict.keys():\n                mask_label_dict[label] = mask\n            else:\n                mask_label_dict[label] = np.logical_or(mask_label_dict[label],\n                                                       mask)\n\n        wandb_masks = dict()\n        for key, value in mask_label_dict.items():\n            # Create mask for that class.\n            value = value.astype(np.uint8)\n            value[value > 0] = key\n\n            # Create dict of masks for logging.\n            class_name = self.class_id_to_label[key]\n            wandb_masks[class_name] = {\n                'mask_data': value,\n                'class_labels': self.class_id_to_label\n            }\n\n        return wandb_masks\n\n    def _log_data_table(self):\n        \"\"\"Log the W&B Tables for validation data as artifact and calls\n        `use_artifact` on it so that the evaluation table can use the reference\n        of already uploaded images.\n\n        This allows the data to be uploaded just once.\n        \"\"\"\n        data_artifact = self.wandb.Artifact('val', type='dataset')\n        data_artifact.add(self.data_table, 'val_data')\n\n        if not self.wandb.run.offline:\n            self.wandb.run.use_artifact(data_artifact)\n            data_artifact.wait()\n            self.data_table_ref = data_artifact.get('val_data')\n        else:\n            self.data_table_ref = self.data_table\n\n    def _log_eval_table(self, idx):\n        \"\"\"Log the W&B Tables for model evaluation.\n\n        The table will be logged multiple times creating new version. Use this\n        to compare models at different intervals interactively.\n        \"\"\"\n        pred_artifact = self.wandb.Artifact(\n            f'run_{self.wandb.run.id}_pred', type='evaluation')\n        pred_artifact.add(self.eval_table, 'eval_data')\n        if self.by_epoch:\n            aliases = ['latest', f'epoch_{idx}']\n        else:\n            aliases = ['latest', f'iter_{idx}']\n        self.wandb.run.log_artifact(pred_artifact, aliases=aliases)\n"
  },
  {
    "path": "mmdet/core/hook/yolox_lrupdater_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.runner.hooks import HOOKS\nfrom mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook,\n                                          annealing_cos)\n\n\n@HOOKS.register_module()\nclass YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):\n    \"\"\"YOLOX learning rate scheme.\n\n    There are two main differences between YOLOXLrUpdaterHook\n    and CosineAnnealingLrUpdaterHook.\n\n       1. When the current running epoch is greater than\n           `max_epoch-last_epoch`, a fixed learning rate will be used\n       2. The exp warmup scheme is different with LrUpdaterHook in MMCV\n\n    Args:\n        num_last_epochs (int): The number of epochs with a fixed learning rate\n           before the end of the training.\n    \"\"\"\n\n    def __init__(self, num_last_epochs, **kwargs):\n        self.num_last_epochs = num_last_epochs\n        super(YOLOXLrUpdaterHook, self).__init__(**kwargs)\n\n    def get_warmup_lr(self, cur_iters):\n\n        def _get_warmup_lr(cur_iters, regular_lr):\n            # exp warmup scheme\n            k = self.warmup_ratio * pow(\n                (cur_iters + 1) / float(self.warmup_iters), 2)\n            warmup_lr = [_lr * k for _lr in regular_lr]\n            return warmup_lr\n\n        if isinstance(self.base_lr, dict):\n            lr_groups = {}\n            for key, base_lr in self.base_lr.items():\n                lr_groups[key] = _get_warmup_lr(cur_iters, base_lr)\n            return lr_groups\n        else:\n            return _get_warmup_lr(cur_iters, self.base_lr)\n\n    def get_lr(self, runner, base_lr):\n        last_iter = len(runner.data_loader) * self.num_last_epochs\n\n        if self.by_epoch:\n            progress = runner.epoch\n            max_progress = runner.max_epochs\n        else:\n            progress = runner.iter\n            max_progress = runner.max_iters\n\n        progress += 1\n\n        if self.min_lr_ratio is not None:\n            target_lr = base_lr * self.min_lr_ratio\n        else:\n            target_lr = self.min_lr\n\n        if progress >= max_progress - last_iter:\n            # fixed learning rate\n            return target_lr\n        else:\n            return annealing_cos(\n                base_lr, target_lr, (progress - self.warmup_iters) /\n                (max_progress - self.warmup_iters - last_iter))\n"
  },
  {
    "path": "mmdet/core/hook/yolox_mode_switch_hook.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.parallel import is_module_wrapper\nfrom mmcv.runner.hooks import HOOKS, Hook\n\n\n@HOOKS.register_module()\nclass YOLOXModeSwitchHook(Hook):\n    \"\"\"Switch the mode of YOLOX during training.\n\n    This hook turns off the mosaic and mixup data augmentation and switches\n    to use L1 loss in bbox_head.\n\n    Args:\n        num_last_epochs (int): The number of latter epochs in the end of the\n            training to close the data augmentation and switch to L1 loss.\n            Default: 15.\n       skip_type_keys (list[str], optional): Sequence of type string to be\n            skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp')\n    \"\"\"\n\n    def __init__(self,\n                 num_last_epochs=15,\n                 skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')):\n        self.num_last_epochs = num_last_epochs\n        self.skip_type_keys = skip_type_keys\n        self._restart_dataloader = False\n\n    def before_train_epoch(self, runner):\n        \"\"\"Close mosaic and mixup augmentation and switches to use L1 loss.\"\"\"\n        epoch = runner.epoch\n        train_loader = runner.data_loader\n        model = runner.model\n        if is_module_wrapper(model):\n            model = model.module\n        if (epoch + 1) == runner.max_epochs - self.num_last_epochs:\n            runner.logger.info('No mosaic and mixup aug now!')\n            # The dataset pipeline cannot be updated when persistent_workers\n            # is True, so we need to force the dataloader's multi-process\n            # restart. This is a very hacky approach.\n            train_loader.dataset.update_skip_type_keys(self.skip_type_keys)\n            if hasattr(train_loader, 'persistent_workers'\n                       ) and train_loader.persistent_workers is True:\n                train_loader._DataLoader__initialized = False\n                train_loader._iterator = None\n                self._restart_dataloader = True\n            runner.logger.info('Add additional L1 loss now!')\n            model.bbox_head.use_l1 = True\n        else:\n            # Once the restart is complete, we need to restore\n            # the initialization flag.\n            if self._restart_dataloader:\n                train_loader._DataLoader__initialized = True\n"
  },
  {
    "path": "mmdet/core/mask/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .mask_target import mask_target\nfrom .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks\nfrom .utils import encode_mask_results, mask2bbox, split_combined_polys\n\n__all__ = [\n    'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks',\n    'PolygonMasks', 'encode_mask_results', 'mask2bbox'\n]\n"
  },
  {
    "path": "mmdet/core/mask/mask_target.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom torch.nn.modules.utils import _pair\n\n\ndef mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list,\n                cfg):\n    \"\"\"Compute mask target for positive proposals in multiple images.\n\n    Args:\n        pos_proposals_list (list[Tensor]): Positive proposals in multiple\n            images.\n        pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each\n            positive proposals.\n        gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of\n            each image.\n        cfg (dict): Config dict that specifies the mask size.\n\n    Returns:\n        list[Tensor]: Mask target of each image.\n\n    Example:\n        >>> import mmcv\n        >>> import mmdet\n        >>> from mmdet.core.mask import BitmapMasks\n        >>> from mmdet.core.mask.mask_target import *\n        >>> H, W = 17, 18\n        >>> cfg = mmcv.Config({'mask_size': (13, 14)})\n        >>> rng = np.random.RandomState(0)\n        >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image\n        >>> pos_proposals_list = [\n        >>>     torch.Tensor([\n        >>>         [ 7.2425,  5.5929, 13.9414, 14.9541],\n        >>>         [ 7.3241,  3.6170, 16.3850, 15.3102],\n        >>>     ]),\n        >>>     torch.Tensor([\n        >>>         [ 4.8448, 6.4010, 7.0314, 9.7681],\n        >>>         [ 5.9790, 2.6989, 7.4416, 4.8580],\n        >>>         [ 0.0000, 0.0000, 0.1398, 9.8232],\n        >>>     ]),\n        >>> ]\n        >>> # Corresponding class index for each proposal for each image\n        >>> pos_assigned_gt_inds_list = [\n        >>>     torch.LongTensor([7, 0]),\n        >>>     torch.LongTensor([5, 4, 1]),\n        >>> ]\n        >>> # Ground truth mask for each true object for each image\n        >>> gt_masks_list = [\n        >>>     BitmapMasks(rng.rand(8, H, W), height=H, width=W),\n        >>>     BitmapMasks(rng.rand(6, H, W), height=H, width=W),\n        >>> ]\n        >>> mask_targets = mask_target(\n        >>>     pos_proposals_list, pos_assigned_gt_inds_list,\n        >>>     gt_masks_list, cfg)\n        >>> assert mask_targets.shape == (5,) + cfg['mask_size']\n    \"\"\"\n    cfg_list = [cfg for _ in range(len(pos_proposals_list))]\n    mask_targets = map(mask_target_single, pos_proposals_list,\n                       pos_assigned_gt_inds_list, gt_masks_list, cfg_list)\n    mask_targets = list(mask_targets)\n    if len(mask_targets) > 0:\n        mask_targets = torch.cat(mask_targets)\n    return mask_targets\n\n\ndef mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):\n    \"\"\"Compute mask target for each positive proposal in the image.\n\n    Args:\n        pos_proposals (Tensor): Positive proposals.\n        pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals.\n        gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap\n            or Polygon.\n        cfg (dict): Config dict that indicate the mask size.\n\n    Returns:\n        Tensor: Mask target of each positive proposals in the image.\n\n    Example:\n        >>> import mmcv\n        >>> import mmdet\n        >>> from mmdet.core.mask import BitmapMasks\n        >>> from mmdet.core.mask.mask_target import *  # NOQA\n        >>> H, W = 32, 32\n        >>> cfg = mmcv.Config({'mask_size': (7, 11)})\n        >>> rng = np.random.RandomState(0)\n        >>> # Masks for each ground truth box (relative to the image)\n        >>> gt_masks_data = rng.rand(3, H, W)\n        >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W)\n        >>> # Predicted positive boxes in one image\n        >>> pos_proposals = torch.FloatTensor([\n        >>>     [ 16.2,   5.5, 19.9, 20.9],\n        >>>     [ 17.3,  13.6, 19.3, 19.3],\n        >>>     [ 14.8,  16.4, 17.0, 23.7],\n        >>>     [  0.0,   0.0, 16.0, 16.0],\n        >>>     [  4.0,   0.0, 20.0, 16.0],\n        >>> ])\n        >>> # For each predicted proposal, its assignment to a gt mask\n        >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1])\n        >>> mask_targets = mask_target_single(\n        >>>     pos_proposals, pos_assigned_gt_inds, gt_masks, cfg)\n        >>> assert mask_targets.shape == (5,) + cfg['mask_size']\n    \"\"\"\n    device = pos_proposals.device\n    mask_size = _pair(cfg.mask_size)\n    binarize = not cfg.get('soft_mask_target', False)\n    num_pos = pos_proposals.size(0)\n    if num_pos > 0:\n        proposals_np = pos_proposals.cpu().numpy()\n        maxh, maxw = gt_masks.height, gt_masks.width\n        proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw)\n        proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh)\n        pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()\n\n        mask_targets = gt_masks.crop_and_resize(\n            proposals_np,\n            mask_size,\n            device=device,\n            inds=pos_assigned_gt_inds,\n            binarize=binarize).to_ndarray()\n\n        mask_targets = torch.from_numpy(mask_targets).float().to(device)\n    else:\n        mask_targets = pos_proposals.new_zeros((0, ) + mask_size)\n\n    return mask_targets\n"
  },
  {
    "path": "mmdet/core/mask/structures.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nimport cv2\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\nimport torch\nfrom mmcv.ops.roi_align import roi_align\n\n\nclass BaseInstanceMasks(metaclass=ABCMeta):\n    \"\"\"Base class for instance masks.\"\"\"\n\n    @abstractmethod\n    def rescale(self, scale, interpolation='nearest'):\n        \"\"\"Rescale masks as large as possible while keeping the aspect ratio.\n        For details can refer to `mmcv.imrescale`.\n\n        Args:\n            scale (tuple[int]): The maximum size (h, w) of rescaled mask.\n            interpolation (str): Same as :func:`mmcv.imrescale`.\n\n        Returns:\n            BaseInstanceMasks: The rescaled masks.\n        \"\"\"\n\n    @abstractmethod\n    def resize(self, out_shape, interpolation='nearest'):\n        \"\"\"Resize masks to the given out_shape.\n\n        Args:\n            out_shape: Target (h, w) of resized mask.\n            interpolation (str): See :func:`mmcv.imresize`.\n\n        Returns:\n            BaseInstanceMasks: The resized masks.\n        \"\"\"\n\n    @abstractmethod\n    def flip(self, flip_direction='horizontal'):\n        \"\"\"Flip masks alone the given direction.\n\n        Args:\n            flip_direction (str): Either 'horizontal' or 'vertical'.\n\n        Returns:\n            BaseInstanceMasks: The flipped masks.\n        \"\"\"\n\n    @abstractmethod\n    def pad(self, out_shape, pad_val):\n        \"\"\"Pad masks to the given size of (h, w).\n\n        Args:\n            out_shape (tuple[int]): Target (h, w) of padded mask.\n            pad_val (int): The padded value.\n\n        Returns:\n            BaseInstanceMasks: The padded masks.\n        \"\"\"\n\n    @abstractmethod\n    def crop(self, bbox):\n        \"\"\"Crop each mask by the given bbox.\n\n        Args:\n            bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ).\n\n        Return:\n            BaseInstanceMasks: The cropped masks.\n        \"\"\"\n\n    @abstractmethod\n    def crop_and_resize(self,\n                        bboxes,\n                        out_shape,\n                        inds,\n                        device,\n                        interpolation='bilinear',\n                        binarize=True):\n        \"\"\"Crop and resize masks by the given bboxes.\n\n        This function is mainly used in mask targets computation.\n        It firstly align mask to bboxes by assigned_inds, then crop mask by the\n        assigned bbox and resize to the size of (mask_h, mask_w)\n\n        Args:\n            bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4)\n            out_shape (tuple[int]): Target (h, w) of resized mask\n            inds (ndarray): Indexes to assign masks to each bbox,\n                shape (N,) and values should be between [0, num_masks - 1].\n            device (str): Device of bboxes\n            interpolation (str): See `mmcv.imresize`\n            binarize (bool): if True fractional values are rounded to 0 or 1\n                after the resize operation. if False and unsupported an error\n                will be raised. Defaults to True.\n\n        Return:\n            BaseInstanceMasks: the cropped and resized masks.\n        \"\"\"\n\n    @abstractmethod\n    def expand(self, expanded_h, expanded_w, top, left):\n        \"\"\"see :class:`Expand`.\"\"\"\n\n    @property\n    @abstractmethod\n    def areas(self):\n        \"\"\"ndarray: areas of each instance.\"\"\"\n\n    @abstractmethod\n    def to_ndarray(self):\n        \"\"\"Convert masks to the format of ndarray.\n\n        Return:\n            ndarray: Converted masks in the format of ndarray.\n        \"\"\"\n\n    @abstractmethod\n    def to_tensor(self, dtype, device):\n        \"\"\"Convert masks to the format of Tensor.\n\n        Args:\n            dtype (str): Dtype of converted mask.\n            device (torch.device): Device of converted masks.\n\n        Returns:\n            Tensor: Converted masks in the format of Tensor.\n        \"\"\"\n\n    @abstractmethod\n    def translate(self,\n                  out_shape,\n                  offset,\n                  direction='horizontal',\n                  fill_val=0,\n                  interpolation='bilinear'):\n        \"\"\"Translate the masks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            offset (int | float): The offset for translate.\n            direction (str): The translate direction, either \"horizontal\"\n                or \"vertical\".\n            fill_val (int | float): Border value. Default 0.\n            interpolation (str): Same as :func:`mmcv.imtranslate`.\n\n        Returns:\n            Translated masks.\n        \"\"\"\n\n    def shear(self,\n              out_shape,\n              magnitude,\n              direction='horizontal',\n              border_value=0,\n              interpolation='bilinear'):\n        \"\"\"Shear the masks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            magnitude (int | float): The magnitude used for shear.\n            direction (str): The shear direction, either \"horizontal\"\n                or \"vertical\".\n            border_value (int | tuple[int]): Value used in case of a\n                constant border. Default 0.\n            interpolation (str): Same as in :func:`mmcv.imshear`.\n\n        Returns:\n            ndarray: Sheared masks.\n        \"\"\"\n\n    @abstractmethod\n    def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):\n        \"\"\"Rotate the masks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            angle (int | float): Rotation angle in degrees. Positive values\n                mean counter-clockwise rotation.\n            center (tuple[float], optional): Center point (w, h) of the\n                rotation in source image. If not specified, the center of\n                the image will be used.\n            scale (int | float): Isotropic scale factor.\n            fill_val (int | float): Border value. Default 0 for masks.\n\n        Returns:\n            Rotated masks.\n        \"\"\"\n\n\nclass BitmapMasks(BaseInstanceMasks):\n    \"\"\"This class represents masks in the form of bitmaps.\n\n    Args:\n        masks (ndarray): ndarray of masks in shape (N, H, W), where N is\n            the number of objects.\n        height (int): height of masks\n        width (int): width of masks\n\n    Example:\n        >>> from mmdet.core.mask.structures import *  # NOQA\n        >>> num_masks, H, W = 3, 32, 32\n        >>> rng = np.random.RandomState(0)\n        >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int)\n        >>> self = BitmapMasks(masks, height=H, width=W)\n\n        >>> # demo crop_and_resize\n        >>> num_boxes = 5\n        >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)\n        >>> out_shape = (14, 14)\n        >>> inds = torch.randint(0, len(self), size=(num_boxes,))\n        >>> device = 'cpu'\n        >>> interpolation = 'bilinear'\n        >>> new = self.crop_and_resize(\n        ...     bboxes, out_shape, inds, device, interpolation)\n        >>> assert len(new) == num_boxes\n        >>> assert new.height, new.width == out_shape\n    \"\"\"\n\n    def __init__(self, masks, height, width):\n        self.height = height\n        self.width = width\n        if len(masks) == 0:\n            self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)\n        else:\n            assert isinstance(masks, (list, np.ndarray))\n            if isinstance(masks, list):\n                assert isinstance(masks[0], np.ndarray)\n                assert masks[0].ndim == 2  # (H, W)\n            else:\n                assert masks.ndim == 3  # (N, H, W)\n\n            self.masks = np.stack(masks).reshape(-1, height, width)\n            assert self.masks.shape[1] == self.height\n            assert self.masks.shape[2] == self.width\n\n    def __getitem__(self, index):\n        \"\"\"Index the BitmapMask.\n\n        Args:\n            index (int | ndarray): Indices in the format of integer or ndarray.\n\n        Returns:\n            :obj:`BitmapMasks`: Indexed bitmap masks.\n        \"\"\"\n        masks = self.masks[index].reshape(-1, self.height, self.width)\n        return BitmapMasks(masks, self.height, self.width)\n\n    def __iter__(self):\n        return iter(self.masks)\n\n    def __repr__(self):\n        s = self.__class__.__name__ + '('\n        s += f'num_masks={len(self.masks)}, '\n        s += f'height={self.height}, '\n        s += f'width={self.width})'\n        return s\n\n    def __len__(self):\n        \"\"\"Number of masks.\"\"\"\n        return len(self.masks)\n\n    def rescale(self, scale, interpolation='nearest'):\n        \"\"\"See :func:`BaseInstanceMasks.rescale`.\"\"\"\n        if len(self.masks) == 0:\n            new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)\n            rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)\n        else:\n            rescaled_masks = np.stack([\n                mmcv.imrescale(mask, scale, interpolation=interpolation)\n                for mask in self.masks\n            ])\n        height, width = rescaled_masks.shape[1:]\n        return BitmapMasks(rescaled_masks, height, width)\n\n    def resize(self, out_shape, interpolation='nearest'):\n        \"\"\"See :func:`BaseInstanceMasks.resize`.\"\"\"\n        if len(self.masks) == 0:\n            resized_masks = np.empty((0, *out_shape), dtype=np.uint8)\n        else:\n            resized_masks = np.stack([\n                mmcv.imresize(\n                    mask, out_shape[::-1], interpolation=interpolation)\n                for mask in self.masks\n            ])\n        return BitmapMasks(resized_masks, *out_shape)\n\n    def flip(self, flip_direction='horizontal'):\n        \"\"\"See :func:`BaseInstanceMasks.flip`.\"\"\"\n        assert flip_direction in ('horizontal', 'vertical', 'diagonal')\n\n        if len(self.masks) == 0:\n            flipped_masks = self.masks\n        else:\n            flipped_masks = np.stack([\n                mmcv.imflip(mask, direction=flip_direction)\n                for mask in self.masks\n            ])\n        return BitmapMasks(flipped_masks, self.height, self.width)\n\n    def pad(self, out_shape, pad_val=0):\n        \"\"\"See :func:`BaseInstanceMasks.pad`.\"\"\"\n        if len(self.masks) == 0:\n            padded_masks = np.empty((0, *out_shape), dtype=np.uint8)\n        else:\n            padded_masks = np.stack([\n                mmcv.impad(mask, shape=out_shape, pad_val=pad_val)\n                for mask in self.masks\n            ])\n        return BitmapMasks(padded_masks, *out_shape)\n\n    def crop(self, bbox):\n        \"\"\"See :func:`BaseInstanceMasks.crop`.\"\"\"\n        assert isinstance(bbox, np.ndarray)\n        assert bbox.ndim == 1\n\n        # clip the boundary\n        bbox = bbox.copy()\n        bbox[0::2] = np.clip(bbox[0::2], 0, self.width)\n        bbox[1::2] = np.clip(bbox[1::2], 0, self.height)\n        x1, y1, x2, y2 = bbox\n        w = np.maximum(x2 - x1, 1)\n        h = np.maximum(y2 - y1, 1)\n\n        if len(self.masks) == 0:\n            cropped_masks = np.empty((0, h, w), dtype=np.uint8)\n        else:\n            cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]\n        return BitmapMasks(cropped_masks, h, w)\n\n    def crop_and_resize(self,\n                        bboxes,\n                        out_shape,\n                        inds,\n                        device='cpu',\n                        interpolation='bilinear',\n                        binarize=True):\n        \"\"\"See :func:`BaseInstanceMasks.crop_and_resize`.\"\"\"\n        if len(self.masks) == 0:\n            empty_masks = np.empty((0, *out_shape), dtype=np.uint8)\n            return BitmapMasks(empty_masks, *out_shape)\n\n        # convert bboxes to tensor\n        if isinstance(bboxes, np.ndarray):\n            bboxes = torch.from_numpy(bboxes).to(device=device)\n        if isinstance(inds, np.ndarray):\n            inds = torch.from_numpy(inds).to(device=device)\n\n        num_bbox = bboxes.shape[0]\n        fake_inds = torch.arange(\n            num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]\n        rois = torch.cat([fake_inds, bboxes], dim=1)  # Nx5\n        rois = rois.to(device=device)\n        if num_bbox > 0:\n            gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(\n                0, inds).to(dtype=rois.dtype)\n            targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,\n                                1.0, 0, 'avg', True).squeeze(1)\n            if binarize:\n                resized_masks = (targets >= 0.5).cpu().numpy()\n            else:\n                resized_masks = targets.cpu().numpy()\n        else:\n            resized_masks = []\n        return BitmapMasks(resized_masks, *out_shape)\n\n    def expand(self, expanded_h, expanded_w, top, left):\n        \"\"\"See :func:`BaseInstanceMasks.expand`.\"\"\"\n        if len(self.masks) == 0:\n            expanded_mask = np.empty((0, expanded_h, expanded_w),\n                                     dtype=np.uint8)\n        else:\n            expanded_mask = np.zeros((len(self), expanded_h, expanded_w),\n                                     dtype=np.uint8)\n            expanded_mask[:, top:top + self.height,\n                          left:left + self.width] = self.masks\n        return BitmapMasks(expanded_mask, expanded_h, expanded_w)\n\n    def translate(self,\n                  out_shape,\n                  offset,\n                  direction='horizontal',\n                  fill_val=0,\n                  interpolation='bilinear'):\n        \"\"\"Translate the BitmapMasks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            offset (int | float): The offset for translate.\n            direction (str): The translate direction, either \"horizontal\"\n                or \"vertical\".\n            fill_val (int | float): Border value. Default 0 for masks.\n            interpolation (str): Same as :func:`mmcv.imtranslate`.\n\n        Returns:\n            BitmapMasks: Translated BitmapMasks.\n\n        Example:\n            >>> from mmdet.core.mask.structures import BitmapMasks\n            >>> self = BitmapMasks.random(dtype=np.uint8)\n            >>> out_shape = (32, 32)\n            >>> offset = 4\n            >>> direction = 'horizontal'\n            >>> fill_val = 0\n            >>> interpolation = 'bilinear'\n            >>> # Note, There seem to be issues when:\n            >>> # * out_shape is different than self's shape\n            >>> # * the mask dtype is not supported by cv2.AffineWarp\n            >>> new = self.translate(out_shape, offset, direction, fill_val,\n            >>>                      interpolation)\n            >>> assert len(new) == len(self)\n            >>> assert new.height, new.width == out_shape\n        \"\"\"\n        if len(self.masks) == 0:\n            translated_masks = np.empty((0, *out_shape), dtype=np.uint8)\n        else:\n            translated_masks = mmcv.imtranslate(\n                self.masks.transpose((1, 2, 0)),\n                offset,\n                direction,\n                border_value=fill_val,\n                interpolation=interpolation)\n            if translated_masks.ndim == 2:\n                translated_masks = translated_masks[:, :, None]\n            translated_masks = translated_masks.transpose(\n                (2, 0, 1)).astype(self.masks.dtype)\n        return BitmapMasks(translated_masks, *out_shape)\n\n    def shear(self,\n              out_shape,\n              magnitude,\n              direction='horizontal',\n              border_value=0,\n              interpolation='bilinear'):\n        \"\"\"Shear the BitmapMasks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            magnitude (int | float): The magnitude used for shear.\n            direction (str): The shear direction, either \"horizontal\"\n                or \"vertical\".\n            border_value (int | tuple[int]): Value used in case of a\n                constant border.\n            interpolation (str): Same as in :func:`mmcv.imshear`.\n\n        Returns:\n            BitmapMasks: The sheared masks.\n        \"\"\"\n        if len(self.masks) == 0:\n            sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)\n        else:\n            sheared_masks = mmcv.imshear(\n                self.masks.transpose((1, 2, 0)),\n                magnitude,\n                direction,\n                border_value=border_value,\n                interpolation=interpolation)\n            if sheared_masks.ndim == 2:\n                sheared_masks = sheared_masks[:, :, None]\n            sheared_masks = sheared_masks.transpose(\n                (2, 0, 1)).astype(self.masks.dtype)\n        return BitmapMasks(sheared_masks, *out_shape)\n\n    def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):\n        \"\"\"Rotate the BitmapMasks.\n\n        Args:\n            out_shape (tuple[int]): Shape for output mask, format (h, w).\n            angle (int | float): Rotation angle in degrees. Positive values\n                mean counter-clockwise rotation.\n            center (tuple[float], optional): Center point (w, h) of the\n                rotation in source image. If not specified, the center of\n                the image will be used.\n            scale (int | float): Isotropic scale factor.\n            fill_val (int | float): Border value. Default 0 for masks.\n\n        Returns:\n            BitmapMasks: Rotated BitmapMasks.\n        \"\"\"\n        if len(self.masks) == 0:\n            rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)\n        else:\n            rotated_masks = mmcv.imrotate(\n                self.masks.transpose((1, 2, 0)),\n                angle,\n                center=center,\n                scale=scale,\n                border_value=fill_val)\n            if rotated_masks.ndim == 2:\n                # case when only one mask, (h, w)\n                rotated_masks = rotated_masks[:, :, None]  # (h, w, 1)\n            rotated_masks = rotated_masks.transpose(\n                (2, 0, 1)).astype(self.masks.dtype)\n        return BitmapMasks(rotated_masks, *out_shape)\n\n    @property\n    def areas(self):\n        \"\"\"See :py:attr:`BaseInstanceMasks.areas`.\"\"\"\n        return self.masks.sum((1, 2))\n\n    def to_ndarray(self):\n        \"\"\"See :func:`BaseInstanceMasks.to_ndarray`.\"\"\"\n        return self.masks\n\n    def to_tensor(self, dtype, device):\n        \"\"\"See :func:`BaseInstanceMasks.to_tensor`.\"\"\"\n        return torch.tensor(self.masks, dtype=dtype, device=device)\n\n    @classmethod\n    def random(cls,\n               num_masks=3,\n               height=32,\n               width=32,\n               dtype=np.uint8,\n               rng=None):\n        \"\"\"Generate random bitmap masks for demo / testing purposes.\n\n        Example:\n            >>> from mmdet.core.mask.structures import BitmapMasks\n            >>> self = BitmapMasks.random()\n            >>> print('self = {}'.format(self))\n            self = BitmapMasks(num_masks=3, height=32, width=32)\n        \"\"\"\n        from mmdet.utils.util_random import ensure_rng\n        rng = ensure_rng(rng)\n        masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype)\n        self = cls(masks, height=height, width=width)\n        return self\n\n    def get_bboxes(self):\n        num_masks = len(self)\n        boxes = np.zeros((num_masks, 4), dtype=np.float32)\n        x_any = self.masks.any(axis=1)\n        y_any = self.masks.any(axis=2)\n        for idx in range(num_masks):\n            x = np.where(x_any[idx, :])[0]\n            y = np.where(y_any[idx, :])[0]\n            if len(x) > 0 and len(y) > 0:\n                # use +1 for x_max and y_max so that the right and bottom\n                # boundary of instance masks are fully included by the box\n                boxes[idx, :] = np.array([x[0], y[0], x[-1] + 1, y[-1] + 1],\n                                         dtype=np.float32)\n        return boxes\n\n\nclass PolygonMasks(BaseInstanceMasks):\n    \"\"\"This class represents masks in the form of polygons.\n\n    Polygons is a list of three levels. The first level of the list\n    corresponds to objects, the second level to the polys that compose the\n    object, the third level to the poly coordinates\n\n    Args:\n        masks (list[list[ndarray]]): The first level of the list\n            corresponds to objects, the second level to the polys that\n            compose the object, the third level to the poly coordinates\n        height (int): height of masks\n        width (int): width of masks\n\n    Example:\n        >>> from mmdet.core.mask.structures import *  # NOQA\n        >>> masks = [\n        >>>     [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ]\n        >>> ]\n        >>> height, width = 16, 16\n        >>> self = PolygonMasks(masks, height, width)\n\n        >>> # demo translate\n        >>> new = self.translate((16, 16), 4., direction='horizontal')\n        >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2])\n        >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4)\n\n        >>> # demo crop_and_resize\n        >>> num_boxes = 3\n        >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)\n        >>> out_shape = (16, 16)\n        >>> inds = torch.randint(0, len(self), size=(num_boxes,))\n        >>> device = 'cpu'\n        >>> interpolation = 'bilinear'\n        >>> new = self.crop_and_resize(\n        ...     bboxes, out_shape, inds, device, interpolation)\n        >>> assert len(new) == num_boxes\n        >>> assert new.height, new.width == out_shape\n    \"\"\"\n\n    def __init__(self, masks, height, width):\n        assert isinstance(masks, list)\n        if len(masks) > 0:\n            assert isinstance(masks[0], list)\n            assert isinstance(masks[0][0], np.ndarray)\n\n        self.height = height\n        self.width = width\n        self.masks = masks\n\n    def __getitem__(self, index):\n        \"\"\"Index the polygon masks.\n\n        Args:\n            index (ndarray | List): The indices.\n\n        Returns:\n            :obj:`PolygonMasks`: The indexed polygon masks.\n        \"\"\"\n        if isinstance(index, np.ndarray):\n            index = index.tolist()\n        if isinstance(index, list):\n            masks = [self.masks[i] for i in index]\n        else:\n            try:\n                masks = self.masks[index]\n            except Exception:\n                raise ValueError(\n                    f'Unsupported input of type {type(index)} for indexing!')\n        if len(masks) and isinstance(masks[0], np.ndarray):\n            masks = [masks]  # ensure a list of three levels\n        return PolygonMasks(masks, self.height, self.width)\n\n    def __iter__(self):\n        return iter(self.masks)\n\n    def __repr__(self):\n        s = self.__class__.__name__ + '('\n        s += f'num_masks={len(self.masks)}, '\n        s += f'height={self.height}, '\n        s += f'width={self.width})'\n        return s\n\n    def __len__(self):\n        \"\"\"Number of masks.\"\"\"\n        return len(self.masks)\n\n    def rescale(self, scale, interpolation=None):\n        \"\"\"see :func:`BaseInstanceMasks.rescale`\"\"\"\n        new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)\n        if len(self.masks) == 0:\n            rescaled_masks = PolygonMasks([], new_h, new_w)\n        else:\n            rescaled_masks = self.resize((new_h, new_w))\n        return rescaled_masks\n\n    def resize(self, out_shape, interpolation=None):\n        \"\"\"see :func:`BaseInstanceMasks.resize`\"\"\"\n        if len(self.masks) == 0:\n            resized_masks = PolygonMasks([], *out_shape)\n        else:\n            h_scale = out_shape[0] / self.height\n            w_scale = out_shape[1] / self.width\n            resized_masks = []\n            for poly_per_obj in self.masks:\n                resized_poly = []\n                for p in poly_per_obj:\n                    p = p.copy()\n                    p[0::2] = p[0::2] * w_scale\n                    p[1::2] = p[1::2] * h_scale\n                    resized_poly.append(p)\n                resized_masks.append(resized_poly)\n            resized_masks = PolygonMasks(resized_masks, *out_shape)\n        return resized_masks\n\n    def flip(self, flip_direction='horizontal'):\n        \"\"\"see :func:`BaseInstanceMasks.flip`\"\"\"\n        assert flip_direction in ('horizontal', 'vertical', 'diagonal')\n        if len(self.masks) == 0:\n            flipped_masks = PolygonMasks([], self.height, self.width)\n        else:\n            flipped_masks = []\n            for poly_per_obj in self.masks:\n                flipped_poly_per_obj = []\n                for p in poly_per_obj:\n                    p = p.copy()\n                    if flip_direction == 'horizontal':\n                        p[0::2] = self.width - p[0::2]\n                    elif flip_direction == 'vertical':\n                        p[1::2] = self.height - p[1::2]\n                    else:\n                        p[0::2] = self.width - p[0::2]\n                        p[1::2] = self.height - p[1::2]\n                    flipped_poly_per_obj.append(p)\n                flipped_masks.append(flipped_poly_per_obj)\n            flipped_masks = PolygonMasks(flipped_masks, self.height,\n                                         self.width)\n        return flipped_masks\n\n    def crop(self, bbox):\n        \"\"\"see :func:`BaseInstanceMasks.crop`\"\"\"\n        assert isinstance(bbox, np.ndarray)\n        assert bbox.ndim == 1\n\n        # clip the boundary\n        bbox = bbox.copy()\n        bbox[0::2] = np.clip(bbox[0::2], 0, self.width)\n        bbox[1::2] = np.clip(bbox[1::2], 0, self.height)\n        x1, y1, x2, y2 = bbox\n        w = np.maximum(x2 - x1, 1)\n        h = np.maximum(y2 - y1, 1)\n\n        if len(self.masks) == 0:\n            cropped_masks = PolygonMasks([], h, w)\n        else:\n            cropped_masks = []\n            for poly_per_obj in self.masks:\n                cropped_poly_per_obj = []\n                for p in poly_per_obj:\n                    # pycocotools will clip the boundary\n                    p = p.copy()\n                    p[0::2] = p[0::2] - bbox[0]\n                    p[1::2] = p[1::2] - bbox[1]\n                    cropped_poly_per_obj.append(p)\n                cropped_masks.append(cropped_poly_per_obj)\n            cropped_masks = PolygonMasks(cropped_masks, h, w)\n        return cropped_masks\n\n    def pad(self, out_shape, pad_val=0):\n        \"\"\"padding has no effect on polygons`\"\"\"\n        return PolygonMasks(self.masks, *out_shape)\n\n    def expand(self, *args, **kwargs):\n        \"\"\"TODO: Add expand for polygon\"\"\"\n        raise NotImplementedError\n\n    def crop_and_resize(self,\n                        bboxes,\n                        out_shape,\n                        inds,\n                        device='cpu',\n                        interpolation='bilinear',\n                        binarize=True):\n        \"\"\"see :func:`BaseInstanceMasks.crop_and_resize`\"\"\"\n        out_h, out_w = out_shape\n        if len(self.masks) == 0:\n            return PolygonMasks([], out_h, out_w)\n\n        if not binarize:\n            raise ValueError('Polygons are always binary, '\n                             'setting binarize=False is unsupported')\n\n        resized_masks = []\n        for i in range(len(bboxes)):\n            mask = self.masks[inds[i]]\n            bbox = bboxes[i, :]\n            x1, y1, x2, y2 = bbox\n            w = np.maximum(x2 - x1, 1)\n            h = np.maximum(y2 - y1, 1)\n            h_scale = out_h / max(h, 0.1)  # avoid too large scale\n            w_scale = out_w / max(w, 0.1)\n\n            resized_mask = []\n            for p in mask:\n                p = p.copy()\n                # crop\n                # pycocotools will clip the boundary\n                p[0::2] = p[0::2] - bbox[0]\n                p[1::2] = p[1::2] - bbox[1]\n\n                # resize\n                p[0::2] = p[0::2] * w_scale\n                p[1::2] = p[1::2] * h_scale\n                resized_mask.append(p)\n            resized_masks.append(resized_mask)\n        return PolygonMasks(resized_masks, *out_shape)\n\n    def translate(self,\n                  out_shape,\n                  offset,\n                  direction='horizontal',\n                  fill_val=None,\n                  interpolation=None):\n        \"\"\"Translate the PolygonMasks.\n\n        Example:\n            >>> self = PolygonMasks.random(dtype=np.int)\n            >>> out_shape = (self.height, self.width)\n            >>> new = self.translate(out_shape, 4., direction='horizontal')\n            >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2])\n            >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4)  # noqa: E501\n        \"\"\"\n        assert fill_val is None or fill_val == 0, 'Here fill_val is not '\\\n            f'used, and defaultly should be None or 0. got {fill_val}.'\n        if len(self.masks) == 0:\n            translated_masks = PolygonMasks([], *out_shape)\n        else:\n            translated_masks = []\n            for poly_per_obj in self.masks:\n                translated_poly_per_obj = []\n                for p in poly_per_obj:\n                    p = p.copy()\n                    if direction == 'horizontal':\n                        p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])\n                    elif direction == 'vertical':\n                        p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])\n                    translated_poly_per_obj.append(p)\n                translated_masks.append(translated_poly_per_obj)\n            translated_masks = PolygonMasks(translated_masks, *out_shape)\n        return translated_masks\n\n    def shear(self,\n              out_shape,\n              magnitude,\n              direction='horizontal',\n              border_value=0,\n              interpolation='bilinear'):\n        \"\"\"See :func:`BaseInstanceMasks.shear`.\"\"\"\n        if len(self.masks) == 0:\n            sheared_masks = PolygonMasks([], *out_shape)\n        else:\n            sheared_masks = []\n            if direction == 'horizontal':\n                shear_matrix = np.stack([[1, magnitude],\n                                         [0, 1]]).astype(np.float32)\n            elif direction == 'vertical':\n                shear_matrix = np.stack([[1, 0], [magnitude,\n                                                  1]]).astype(np.float32)\n            for poly_per_obj in self.masks:\n                sheared_poly = []\n                for p in poly_per_obj:\n                    p = np.stack([p[0::2], p[1::2]], axis=0)  # [2, n]\n                    new_coords = np.matmul(shear_matrix, p)  # [2, n]\n                    new_coords[0, :] = np.clip(new_coords[0, :], 0,\n                                               out_shape[1])\n                    new_coords[1, :] = np.clip(new_coords[1, :], 0,\n                                               out_shape[0])\n                    sheared_poly.append(\n                        new_coords.transpose((1, 0)).reshape(-1))\n                sheared_masks.append(sheared_poly)\n            sheared_masks = PolygonMasks(sheared_masks, *out_shape)\n        return sheared_masks\n\n    def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):\n        \"\"\"See :func:`BaseInstanceMasks.rotate`.\"\"\"\n        if len(self.masks) == 0:\n            rotated_masks = PolygonMasks([], *out_shape)\n        else:\n            rotated_masks = []\n            rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)\n            for poly_per_obj in self.masks:\n                rotated_poly = []\n                for p in poly_per_obj:\n                    p = p.copy()\n                    coords = np.stack([p[0::2], p[1::2]], axis=1)  # [n, 2]\n                    # pad 1 to convert from format [x, y] to homogeneous\n                    # coordinates format [x, y, 1]\n                    coords = np.concatenate(\n                        (coords, np.ones((coords.shape[0], 1), coords.dtype)),\n                        axis=1)  # [n, 3]\n                    rotated_coords = np.matmul(\n                        rotate_matrix[None, :, :],\n                        coords[:, :, None])[..., 0]  # [n, 2, 1] -> [n, 2]\n                    rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,\n                                                   out_shape[1])\n                    rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,\n                                                   out_shape[0])\n                    rotated_poly.append(rotated_coords.reshape(-1))\n                rotated_masks.append(rotated_poly)\n            rotated_masks = PolygonMasks(rotated_masks, *out_shape)\n        return rotated_masks\n\n    def to_bitmap(self):\n        \"\"\"convert polygon masks to bitmap masks.\"\"\"\n        bitmap_masks = self.to_ndarray()\n        return BitmapMasks(bitmap_masks, self.height, self.width)\n\n    @property\n    def areas(self):\n        \"\"\"Compute areas of masks.\n\n        This func is modified from `detectron2\n        <https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_.\n        The function only works with Polygons using the shoelace formula.\n\n        Return:\n            ndarray: areas of each instance\n        \"\"\"  # noqa: W501\n        area = []\n        for polygons_per_obj in self.masks:\n            area_per_obj = 0\n            for p in polygons_per_obj:\n                area_per_obj += self._polygon_area(p[0::2], p[1::2])\n            area.append(area_per_obj)\n        return np.asarray(area)\n\n    def _polygon_area(self, x, y):\n        \"\"\"Compute the area of a component of a polygon.\n\n        Using the shoelace formula:\n        https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n\n        Args:\n            x (ndarray): x coordinates of the component\n            y (ndarray): y coordinates of the component\n\n        Return:\n            float: the are of the component\n        \"\"\"  # noqa: 501\n        return 0.5 * np.abs(\n            np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n\n    def to_ndarray(self):\n        \"\"\"Convert masks to the format of ndarray.\"\"\"\n        if len(self.masks) == 0:\n            return np.empty((0, self.height, self.width), dtype=np.uint8)\n        bitmap_masks = []\n        for poly_per_obj in self.masks:\n            bitmap_masks.append(\n                polygon_to_bitmap(poly_per_obj, self.height, self.width))\n        return np.stack(bitmap_masks)\n\n    def to_tensor(self, dtype, device):\n        \"\"\"See :func:`BaseInstanceMasks.to_tensor`.\"\"\"\n        if len(self.masks) == 0:\n            return torch.empty((0, self.height, self.width),\n                               dtype=dtype,\n                               device=device)\n        ndarray_masks = self.to_ndarray()\n        return torch.tensor(ndarray_masks, dtype=dtype, device=device)\n\n    @classmethod\n    def random(cls,\n               num_masks=3,\n               height=32,\n               width=32,\n               n_verts=5,\n               dtype=np.float32,\n               rng=None):\n        \"\"\"Generate random polygon masks for demo / testing purposes.\n\n        Adapted from [1]_\n\n        References:\n            .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379  # noqa: E501\n\n        Example:\n            >>> from mmdet.core.mask.structures import PolygonMasks\n            >>> self = PolygonMasks.random()\n            >>> print('self = {}'.format(self))\n        \"\"\"\n        from mmdet.utils.util_random import ensure_rng\n        rng = ensure_rng(rng)\n\n        def _gen_polygon(n, irregularity, spikeyness):\n            \"\"\"Creates the polygon by sampling points on a circle around the\n            centre.  Random noise is added by varying the angular spacing\n            between sequential points, and by varying the radial distance of\n            each point from the centre.\n\n            Based on original code by Mike Ounsworth\n\n            Args:\n                n (int): number of vertices\n                irregularity (float): [0,1] indicating how much variance there\n                    is in the angular spacing of vertices. [0,1] will map to\n                    [0, 2pi/numberOfVerts]\n                spikeyness (float): [0,1] indicating how much variance there is\n                    in each vertex from the circle of radius aveRadius. [0,1]\n                    will map to [0, aveRadius]\n\n            Returns:\n                a list of vertices, in CCW order.\n            \"\"\"\n            from scipy.stats import truncnorm\n\n            # Generate around the unit circle\n            cx, cy = (0.0, 0.0)\n            radius = 1\n\n            tau = np.pi * 2\n\n            irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n\n            spikeyness = np.clip(spikeyness, 1e-9, 1)\n\n            # generate n angle steps\n            lower = (tau / n) - irregularity\n            upper = (tau / n) + irregularity\n            angle_steps = rng.uniform(lower, upper, n)\n\n            # normalize the steps so that point 0 and point n+1 are the same\n            k = angle_steps.sum() / (2 * np.pi)\n            angles = (angle_steps / k).cumsum() + rng.uniform(0, tau)\n\n            # Convert high and low values to be wrt the standard normal range\n            # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html\n            low = 0\n            high = 2 * radius\n            mean = radius\n            std = spikeyness\n            a = (low - mean) / std\n            b = (high - mean) / std\n            tnorm = truncnorm(a=a, b=b, loc=mean, scale=std)\n\n            # now generate the points\n            radii = tnorm.rvs(n, random_state=rng)\n            x_pts = cx + radii * np.cos(angles)\n            y_pts = cy + radii * np.sin(angles)\n\n            points = np.hstack([x_pts[:, None], y_pts[:, None]])\n\n            # Scale to 0-1 space\n            points = points - points.min(axis=0)\n            points = points / points.max(axis=0)\n\n            # Randomly place within 0-1 space\n            points = points * (rng.rand() * .8 + .2)\n            min_pt = points.min(axis=0)\n            max_pt = points.max(axis=0)\n\n            high = (1 - max_pt)\n            low = (0 - min_pt)\n            offset = (rng.rand(2) * (high - low)) + low\n            points = points + offset\n            return points\n\n        def _order_vertices(verts):\n            \"\"\"\n            References:\n                https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise\n            \"\"\"\n            mlat = verts.T[0].sum() / len(verts)\n            mlng = verts.T[1].sum() / len(verts)\n\n            tau = np.pi * 2\n            angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) +\n                     tau) % tau\n            sortx = angle.argsort()\n            verts = verts.take(sortx, axis=0)\n            return verts\n\n        # Generate a random exterior for each requested mask\n        masks = []\n        for _ in range(num_masks):\n            exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9))\n            exterior = (exterior * [(width, height)]).astype(dtype)\n            masks.append([exterior.ravel()])\n\n        self = cls(masks, height, width)\n        return self\n\n    def get_bboxes(self):\n        num_masks = len(self)\n        boxes = np.zeros((num_masks, 4), dtype=np.float32)\n        for idx, poly_per_obj in enumerate(self.masks):\n            # simply use a number that is big enough for comparison with\n            # coordinates\n            xy_min = np.array([self.width * 2, self.height * 2],\n                              dtype=np.float32)\n            xy_max = np.zeros(2, dtype=np.float32)\n            for p in poly_per_obj:\n                xy = np.array(p).reshape(-1, 2).astype(np.float32)\n                xy_min = np.minimum(xy_min, np.min(xy, axis=0))\n                xy_max = np.maximum(xy_max, np.max(xy, axis=0))\n            boxes[idx, :2] = xy_min\n            boxes[idx, 2:] = xy_max\n\n        return boxes\n\n\ndef polygon_to_bitmap(polygons, height, width):\n    \"\"\"Convert masks from the form of polygons to bitmaps.\n\n    Args:\n        polygons (list[ndarray]): masks in polygon representation\n        height (int): mask height\n        width (int): mask width\n\n    Return:\n        ndarray: the converted masks in bitmap representation\n    \"\"\"\n    rles = maskUtils.frPyObjects(polygons, height, width)\n    rle = maskUtils.merge(rles)\n    bitmap_mask = maskUtils.decode(rle).astype(bool)\n    return bitmap_mask\n\n\ndef bitmap_to_polygon(bitmap):\n    \"\"\"Convert masks from the form of bitmaps to polygons.\n\n    Args:\n        bitmap (ndarray): masks in bitmap representation.\n\n    Return:\n        list[ndarray]: the converted mask in polygon representation.\n        bool: whether the mask has holes.\n    \"\"\"\n    bitmap = np.ascontiguousarray(bitmap).astype(np.uint8)\n    # cv2.RETR_CCOMP: retrieves all of the contours and organizes them\n    #   into a two-level hierarchy. At the top level, there are external\n    #   boundaries of the components. At the second level, there are\n    #   boundaries of the holes. If there is another contour inside a hole\n    #   of a connected component, it is still put at the top level.\n    # cv2.CHAIN_APPROX_NONE: stores absolutely all the contour points.\n    outs = cv2.findContours(bitmap, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n    contours = outs[-2]\n    hierarchy = outs[-1]\n    if hierarchy is None:\n        return [], False\n    # hierarchy[i]: 4 elements, for the indexes of next, previous,\n    # parent, or nested contours. If there is no corresponding contour,\n    # it will be -1.\n    with_hole = (hierarchy.reshape(-1, 4)[:, 3] >= 0).any()\n    contours = [c.reshape(-1, 2) for c in contours]\n    return contours, with_hole\n"
  },
  {
    "path": "mmdet/core/mask/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as mask_util\nimport torch\n\n\ndef split_combined_polys(polys, poly_lens, polys_per_mask):\n    \"\"\"Split the combined 1-D polys into masks.\n\n    A mask is represented as a list of polys, and a poly is represented as\n    a 1-D array. In dataset, all masks are concatenated into a single 1-D\n    tensor. Here we need to split the tensor into original representations.\n\n    Args:\n        polys (list): a list (length = image num) of 1-D tensors\n        poly_lens (list): a list (length = image num) of poly length\n        polys_per_mask (list): a list (length = image num) of poly number\n            of each mask\n\n    Returns:\n        list: a list (length = image num) of list (length = mask num) of \\\n            list (length = poly num) of numpy array.\n    \"\"\"\n    mask_polys_list = []\n    for img_id in range(len(polys)):\n        polys_single = polys[img_id]\n        polys_lens_single = poly_lens[img_id].tolist()\n        polys_per_mask_single = polys_per_mask[img_id].tolist()\n\n        split_polys = mmcv.slice_list(polys_single, polys_lens_single)\n        mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single)\n        mask_polys_list.append(mask_polys)\n    return mask_polys_list\n\n\n# TODO: move this function to more proper place\ndef encode_mask_results(mask_results):\n    \"\"\"Encode bitmap mask to RLE code.\n\n    Args:\n        mask_results (list | tuple[list]): bitmap mask results.\n            In mask scoring rcnn, mask_results is a tuple of (segm_results,\n            segm_cls_score).\n\n    Returns:\n        list | tuple: RLE encoded mask.\n    \"\"\"\n    if isinstance(mask_results, tuple):  # mask scoring\n        cls_segms, cls_mask_scores = mask_results\n    else:\n        cls_segms = mask_results\n    num_classes = len(cls_segms)\n    encoded_mask_results = [[] for _ in range(num_classes)]\n    for i in range(len(cls_segms)):\n        for cls_segm in cls_segms[i]:\n            encoded_mask_results[i].append(\n                mask_util.encode(\n                    np.array(\n                        cls_segm[:, :, np.newaxis], order='F',\n                        dtype='uint8'))[0])  # encoded with RLE\n    if isinstance(mask_results, tuple):\n        return encoded_mask_results, cls_mask_scores\n    else:\n        return encoded_mask_results\n\n\ndef mask2bbox(masks):\n    \"\"\"Obtain tight bounding boxes of binary masks.\n\n    Args:\n        masks (Tensor): Binary mask of shape (n, h, w).\n\n    Returns:\n        Tensor: Bboxe with shape (n, 4) of \\\n            positive region in binary mask.\n    \"\"\"\n    N = masks.shape[0]\n    bboxes = masks.new_zeros((N, 4), dtype=torch.float32)\n    x_any = torch.any(masks, dim=1)\n    y_any = torch.any(masks, dim=2)\n    for i in range(N):\n        x = torch.where(x_any[i, :])[0]\n        y = torch.where(y_any[i, :])[0]\n        if len(x) > 0 and len(y) > 0:\n            bboxes[i, :] = bboxes.new_tensor(\n                [x[0], y[0], x[-1] + 1, y[-1] + 1])\n\n    return bboxes\n"
  },
  {
    "path": "mmdet/core/optimizers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .builder import OPTIMIZER_BUILDERS, build_optimizer\nfrom .layer_decay_optimizer_constructor import \\\n    LearningRateDecayOptimizerConstructor\n\n__all__ = [\n    'LearningRateDecayOptimizerConstructor', 'OPTIMIZER_BUILDERS',\n    'build_optimizer'\n]\n"
  },
  {
    "path": "mmdet/core/optimizers/builder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nfrom mmcv.runner.optimizer import OPTIMIZER_BUILDERS as MMCV_OPTIMIZER_BUILDERS\nfrom mmcv.utils import Registry, build_from_cfg\n\nOPTIMIZER_BUILDERS = Registry(\n    'optimizer builder', parent=MMCV_OPTIMIZER_BUILDERS)\n\n\ndef build_optimizer_constructor(cfg):\n    constructor_type = cfg.get('type')\n    if constructor_type in OPTIMIZER_BUILDERS:\n        return build_from_cfg(cfg, OPTIMIZER_BUILDERS)\n    elif constructor_type in MMCV_OPTIMIZER_BUILDERS:\n        return build_from_cfg(cfg, MMCV_OPTIMIZER_BUILDERS)\n    else:\n        raise KeyError(f'{constructor_type} is not registered '\n                       'in the optimizer builder registry.')\n\n\ndef build_optimizer(model, cfg):\n    optimizer_cfg = copy.deepcopy(cfg)\n    constructor_type = optimizer_cfg.pop('constructor',\n                                         'DefaultOptimizerConstructor')\n    paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None)\n    optim_constructor = build_optimizer_constructor(\n        dict(\n            type=constructor_type,\n            optimizer_cfg=optimizer_cfg,\n            paramwise_cfg=paramwise_cfg))\n    optimizer = optim_constructor(model)\n    return optimizer\n"
  },
  {
    "path": "mmdet/core/optimizers/layer_decay_optimizer_constructor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport json\n\nfrom mmcv.runner import DefaultOptimizerConstructor, get_dist_info\n\nfrom mmdet.utils import get_root_logger\nfrom .builder import OPTIMIZER_BUILDERS\n\n\ndef get_layer_id_for_convnext(var_name, max_layer_id):\n    \"\"\"Get the layer id to set the different learning rates in ``layer_wise``\n    decay_type.\n\n    Args:\n        var_name (str): The key of the model.\n        max_layer_id (int): Maximum layer id.\n\n    Returns:\n        int: The id number corresponding to different learning rate in\n        ``LearningRateDecayOptimizerConstructor``.\n    \"\"\"\n\n    if var_name in ('backbone.cls_token', 'backbone.mask_token',\n                    'backbone.pos_embed'):\n        return 0\n    elif var_name.startswith('backbone.downsample_layers'):\n        stage_id = int(var_name.split('.')[2])\n        if stage_id == 0:\n            layer_id = 0\n        elif stage_id == 1:\n            layer_id = 2\n        elif stage_id == 2:\n            layer_id = 3\n        elif stage_id == 3:\n            layer_id = max_layer_id\n        return layer_id\n    elif var_name.startswith('backbone.stages'):\n        stage_id = int(var_name.split('.')[2])\n        block_id = int(var_name.split('.')[3])\n        if stage_id == 0:\n            layer_id = 1\n        elif stage_id == 1:\n            layer_id = 2\n        elif stage_id == 2:\n            layer_id = 3 + block_id // 3\n        elif stage_id == 3:\n            layer_id = max_layer_id\n        return layer_id\n    else:\n        return max_layer_id + 1\n\n\ndef get_stage_id_for_convnext(var_name, max_stage_id):\n    \"\"\"Get the stage id to set the different learning rates in ``stage_wise``\n    decay_type.\n\n    Args:\n        var_name (str): The key of the model.\n        max_stage_id (int): Maximum stage id.\n\n    Returns:\n        int: The id number corresponding to different learning rate in\n        ``LearningRateDecayOptimizerConstructor``.\n    \"\"\"\n\n    if var_name in ('backbone.cls_token', 'backbone.mask_token',\n                    'backbone.pos_embed'):\n        return 0\n    elif var_name.startswith('backbone.downsample_layers'):\n        return 0\n    elif var_name.startswith('backbone.stages'):\n        stage_id = int(var_name.split('.')[2])\n        return stage_id + 1\n    else:\n        return max_stage_id - 1\n\n\n@OPTIMIZER_BUILDERS.register_module()\nclass LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor):\n    # Different learning rates are set for different layers of backbone.\n    # Note: Currently, this optimizer constructor is built for ConvNeXt.\n\n    def add_params(self, params, module, **kwargs):\n        \"\"\"Add all parameters of module to the params list.\n\n        The parameters of the given module will be added to the list of param\n        groups, with specific rules defined by paramwise_cfg.\n\n        Args:\n            params (list[dict]): A list of param groups, it will be modified\n                in place.\n            module (nn.Module): The module to be added.\n        \"\"\"\n        logger = get_root_logger()\n\n        parameter_groups = {}\n        logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}')\n        num_layers = self.paramwise_cfg.get('num_layers') + 2\n        decay_rate = self.paramwise_cfg.get('decay_rate')\n        decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise')\n        logger.info('Build LearningRateDecayOptimizerConstructor  '\n                    f'{decay_type} {decay_rate} - {num_layers}')\n        weight_decay = self.base_wd\n        for name, param in module.named_parameters():\n            if not param.requires_grad:\n                continue  # frozen weights\n            if len(param.shape) == 1 or name.endswith('.bias') or name in (\n                    'pos_embed', 'cls_token'):\n                group_name = 'no_decay'\n                this_weight_decay = 0.\n            else:\n                group_name = 'decay'\n                this_weight_decay = weight_decay\n            if 'layer_wise' in decay_type:\n                if 'ConvNeXt' in module.backbone.__class__.__name__:\n                    layer_id = get_layer_id_for_convnext(\n                        name, self.paramwise_cfg.get('num_layers'))\n                    logger.info(f'set param {name} as id {layer_id}')\n                else:\n                    raise NotImplementedError()\n            elif decay_type == 'stage_wise':\n                if 'ConvNeXt' in module.backbone.__class__.__name__:\n                    layer_id = get_stage_id_for_convnext(name, num_layers)\n                    logger.info(f'set param {name} as id {layer_id}')\n                else:\n                    raise NotImplementedError()\n            group_name = f'layer_{layer_id}_{group_name}'\n\n            if group_name not in parameter_groups:\n                scale = decay_rate**(num_layers - layer_id - 1)\n\n                parameter_groups[group_name] = {\n                    'weight_decay': this_weight_decay,\n                    'params': [],\n                    'param_names': [],\n                    'lr_scale': scale,\n                    'group_name': group_name,\n                    'lr': scale * self.base_lr,\n                }\n\n            parameter_groups[group_name]['params'].append(param)\n            parameter_groups[group_name]['param_names'].append(name)\n        rank, _ = get_dist_info()\n        if rank == 0:\n            to_display = {}\n            for key in parameter_groups:\n                to_display[key] = {\n                    'param_names': parameter_groups[key]['param_names'],\n                    'lr_scale': parameter_groups[key]['lr_scale'],\n                    'lr': parameter_groups[key]['lr'],\n                    'weight_decay': parameter_groups[key]['weight_decay'],\n                }\n            logger.info(f'Param groups = {json.dumps(to_display, indent=2)}')\n        params.extend(parameter_groups.values())\n"
  },
  {
    "path": "mmdet/core/post_processing/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .bbox_nms import fast_nms, multiclass_nms\nfrom .matrix_nms import mask_matrix_nms\nfrom .merge_augs import (merge_aug_bboxes, merge_aug_masks,\n                         merge_aug_proposals, merge_aug_scores)\n\n__all__ = [\n    'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes',\n    'merge_aug_scores', 'merge_aug_masks', 'mask_matrix_nms', 'fast_nms'\n]\n"
  },
  {
    "path": "mmdet/core/post_processing/bbox_nms.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.ops.nms import batched_nms\n\nfrom mmdet.core.bbox.iou_calculators import bbox_overlaps\n\n\ndef multiclass_nms(multi_bboxes,\n                   multi_scores,\n                   score_thr,\n                   nms_cfg,\n                   max_num=-1,\n                   score_factors=None,\n                   return_inds=False):\n    \"\"\"NMS for multi-class bboxes.\n\n    Args:\n        multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)\n        multi_scores (Tensor): shape (n, #class), where the last column\n            contains scores of the background class, but this will be ignored.\n        score_thr (float): bbox threshold, bboxes with scores lower than it\n            will not be considered.\n        nms_cfg (dict): a dict that contains the arguments of nms operations\n        max_num (int, optional): if there are more than max_num bboxes after\n            NMS, only top max_num will be kept. Default to -1.\n        score_factors (Tensor, optional): The factors multiplied to scores\n            before applying NMS. Default to None.\n        return_inds (bool, optional): Whether return the indices of kept\n            bboxes. Default to False.\n\n    Returns:\n        tuple: (dets, labels, indices (optional)), tensors of shape (k, 5),\n            (k), and (k). Dets are boxes with scores. Labels are 0-based.\n    \"\"\"\n    num_classes = multi_scores.size(1) - 1\n    # exclude background category\n    if multi_bboxes.shape[1] > 4:\n        bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)\n    else:\n        bboxes = multi_bboxes[:, None].expand(\n            multi_scores.size(0), num_classes, 4)\n\n    scores = multi_scores[:, :-1]\n\n    labels = torch.arange(num_classes, dtype=torch.long, device=scores.device)\n    labels = labels.view(1, -1).expand_as(scores)\n\n    bboxes = bboxes.reshape(-1, 4)\n    scores = scores.reshape(-1)\n    labels = labels.reshape(-1)\n\n    if not torch.onnx.is_in_onnx_export():\n        # NonZero not supported  in TensorRT\n        # remove low scoring boxes\n        valid_mask = scores > score_thr\n    # multiply score_factor after threshold to preserve more bboxes, improve\n    # mAP by 1% for YOLOv3\n    if score_factors is not None:\n        # expand the shape to match original shape of score\n        score_factors = score_factors.view(-1, 1).expand(\n            multi_scores.size(0), num_classes)\n        score_factors = score_factors.reshape(-1)\n        scores = scores * score_factors\n\n    if not torch.onnx.is_in_onnx_export():\n        # NonZero not supported  in TensorRT\n        inds = valid_mask.nonzero(as_tuple=False).squeeze(1)\n        bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds]\n    else:\n        # TensorRT NMS plugin has invalid output filled with -1\n        # add dummy data to make detection output correct.\n        bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0)\n        scores = torch.cat([scores, scores.new_zeros(1)], dim=0)\n        labels = torch.cat([labels, labels.new_zeros(1)], dim=0)\n\n    if bboxes.numel() == 0:\n        if torch.onnx.is_in_onnx_export():\n            raise RuntimeError('[ONNX Error] Can not record NMS '\n                               'as it has not been executed this time')\n        dets = torch.cat([bboxes, scores[:, None]], -1)\n        if return_inds:\n            return dets, labels, inds\n        else:\n            return dets, labels\n\n    dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)\n\n    if max_num > 0:\n        dets = dets[:max_num]\n        keep = keep[:max_num]\n\n    if return_inds:\n        return dets, labels[keep], inds[keep]\n    else:\n        return dets, labels[keep]\n\n\ndef fast_nms(multi_bboxes,\n             multi_scores,\n             multi_coeffs,\n             score_thr,\n             iou_thr,\n             top_k,\n             max_num=-1):\n    \"\"\"Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_.\n\n    Fast NMS allows already-removed detections to suppress other detections so\n    that every instance can be decided to be kept or discarded in parallel,\n    which is not possible in traditional NMS. This relaxation allows us to\n    implement Fast NMS entirely in standard GPU-accelerated matrix operations.\n\n    Args:\n        multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)\n        multi_scores (Tensor): shape (n, #class+1), where the last column\n            contains scores of the background class, but this will be ignored.\n        multi_coeffs (Tensor): shape (n, #class*coeffs_dim).\n        score_thr (float): bbox threshold, bboxes with scores lower than it\n            will not be considered.\n        iou_thr (float): IoU threshold to be considered as conflicted.\n        top_k (int): if there are more than top_k bboxes before NMS,\n            only top top_k will be kept.\n        max_num (int): if there are more than max_num bboxes after NMS,\n            only top max_num will be kept. If -1, keep all the bboxes.\n            Default: -1.\n\n    Returns:\n        tuple: (dets, labels, coefficients), tensors of shape (k, 5), (k, 1),\n            and (k, coeffs_dim). Dets are boxes with scores.\n            Labels are 0-based.\n    \"\"\"\n\n    scores = multi_scores[:, :-1].t()  # [#class, n]\n    scores, idx = scores.sort(1, descending=True)\n\n    idx = idx[:, :top_k].contiguous()\n    scores = scores[:, :top_k]  # [#class, topk]\n    num_classes, num_dets = idx.size()\n    boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4)\n    coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1)\n\n    iou = bbox_overlaps(boxes, boxes)  # [#class, topk, topk]\n    iou.triu_(diagonal=1)\n    iou_max, _ = iou.max(dim=1)\n\n    # Now just filter out the ones higher than the threshold\n    keep = iou_max <= iou_thr\n\n    # Second thresholding introduces 0.2 mAP gain at negligible time cost\n    keep *= scores > score_thr\n\n    # Assign each kept detection to its corresponding class\n    classes = torch.arange(\n        num_classes, device=boxes.device)[:, None].expand_as(keep)\n    classes = classes[keep]\n\n    boxes = boxes[keep]\n    coeffs = coeffs[keep]\n    scores = scores[keep]\n\n    # Only keep the top max_num highest scores across all classes\n    scores, idx = scores.sort(0, descending=True)\n    if max_num > 0:\n        idx = idx[:max_num]\n        scores = scores[:max_num]\n\n    classes = classes[idx]\n    boxes = boxes[idx]\n    coeffs = coeffs[idx]\n\n    cls_dets = torch.cat([boxes, scores[:, None]], dim=1)\n    return cls_dets, classes, coeffs\n"
  },
  {
    "path": "mmdet/core/post_processing/matrix_nms.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef mask_matrix_nms(masks,\n                    labels,\n                    scores,\n                    filter_thr=-1,\n                    nms_pre=-1,\n                    max_num=-1,\n                    kernel='gaussian',\n                    sigma=2.0,\n                    mask_area=None):\n    \"\"\"Matrix NMS for multi-class masks.\n\n    Args:\n        masks (Tensor): Has shape (num_instances, h, w)\n        labels (Tensor): Labels of corresponding masks,\n            has shape (num_instances,).\n        scores (Tensor): Mask scores of corresponding masks,\n            has shape (num_instances).\n        filter_thr (float): Score threshold to filter the masks\n            after matrix nms. Default: -1, which means do not\n            use filter_thr.\n        nms_pre (int): The max number of instances to do the matrix nms.\n            Default: -1, which means do not use nms_pre.\n        max_num (int, optional): If there are more than max_num masks after\n            matrix, only top max_num will be kept. Default: -1, which means\n            do not use max_num.\n        kernel (str): 'linear' or 'gaussian'.\n        sigma (float): std in gaussian method.\n        mask_area (Tensor): The sum of seg_masks.\n\n    Returns:\n        tuple(Tensor): Processed mask results.\n\n            - scores (Tensor): Updated scores, has shape (n,).\n            - labels (Tensor): Remained labels, has shape (n,).\n            - masks (Tensor): Remained masks, has shape (n, w, h).\n            - keep_inds (Tensor): The indices number of\n                the remaining mask in the input mask, has shape (n,).\n    \"\"\"\n    assert len(labels) == len(masks) == len(scores)\n    if len(labels) == 0:\n        return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(\n            0, *masks.shape[-2:]), labels.new_zeros(0)\n    if mask_area is None:\n        mask_area = masks.sum((1, 2)).float()\n    else:\n        assert len(masks) == len(mask_area)\n\n    # sort and keep top nms_pre\n    scores, sort_inds = torch.sort(scores, descending=True)\n\n    keep_inds = sort_inds\n    if nms_pre > 0 and len(sort_inds) > nms_pre:\n        sort_inds = sort_inds[:nms_pre]\n        keep_inds = keep_inds[:nms_pre]\n        scores = scores[:nms_pre]\n    masks = masks[sort_inds]\n    mask_area = mask_area[sort_inds]\n    labels = labels[sort_inds]\n\n    num_masks = len(labels)\n    flatten_masks = masks.reshape(num_masks, -1).float()\n    # inter.\n    inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0))\n    expanded_mask_area = mask_area.expand(num_masks, num_masks)\n    # Upper triangle iou matrix.\n    iou_matrix = (inter_matrix /\n                  (expanded_mask_area + expanded_mask_area.transpose(1, 0) -\n                   inter_matrix)).triu(diagonal=1)\n    # label_specific matrix.\n    expanded_labels = labels.expand(num_masks, num_masks)\n    # Upper triangle label matrix.\n    label_matrix = (expanded_labels == expanded_labels.transpose(\n        1, 0)).triu(diagonal=1)\n\n    # IoU compensation\n    compensate_iou, _ = (iou_matrix * label_matrix).max(0)\n    compensate_iou = compensate_iou.expand(num_masks,\n                                           num_masks).transpose(1, 0)\n\n    # IoU decay\n    decay_iou = iou_matrix * label_matrix\n\n    # Calculate the decay_coefficient\n    if kernel == 'gaussian':\n        decay_matrix = torch.exp(-1 * sigma * (decay_iou**2))\n        compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2))\n        decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0)\n    elif kernel == 'linear':\n        decay_matrix = (1 - decay_iou) / (1 - compensate_iou)\n        decay_coefficient, _ = decay_matrix.min(0)\n    else:\n        raise NotImplementedError(\n            f'{kernel} kernel is not supported in matrix nms!')\n    # update the score.\n    scores = scores * decay_coefficient\n\n    if filter_thr > 0:\n        keep = scores >= filter_thr\n        keep_inds = keep_inds[keep]\n        if not keep.any():\n            return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros(\n                0, *masks.shape[-2:]), labels.new_zeros(0)\n        masks = masks[keep]\n        scores = scores[keep]\n        labels = labels[keep]\n\n    # sort and keep top max_num\n    scores, sort_inds = torch.sort(scores, descending=True)\n    keep_inds = keep_inds[sort_inds]\n    if max_num > 0 and len(sort_inds) > max_num:\n        sort_inds = sort_inds[:max_num]\n        keep_inds = keep_inds[:max_num]\n        scores = scores[:max_num]\n    masks = masks[sort_inds]\n    labels = labels[sort_inds]\n\n    return scores, labels, masks, keep_inds\n"
  },
  {
    "path": "mmdet/core/post_processing/merge_augs.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\n\nimport numpy as np\nimport torch\nfrom mmcv import ConfigDict\nfrom mmcv.ops import nms\n\nfrom ..bbox import bbox_mapping_back\n\n\ndef merge_aug_proposals(aug_proposals, img_metas, cfg):\n    \"\"\"Merge augmented proposals (multiscale, flip, etc.)\n\n    Args:\n        aug_proposals (list[Tensor]): proposals from different testing\n            schemes, shape (n, 5). Note that they are not rescaled to the\n            original image size.\n\n        img_metas (list[dict]): list of image info dict where each dict has:\n            'img_shape', 'scale_factor', 'flip', and may also contain\n            'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n            For details on the values of these keys see\n            `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n        cfg (dict): rpn test config.\n\n    Returns:\n        Tensor: shape (n, 4), proposals corresponding to original image scale.\n    \"\"\"\n\n    cfg = copy.deepcopy(cfg)\n\n    # deprecate arguments warning\n    if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:\n        warnings.warn(\n            'In rpn_proposal or test_cfg, '\n            'nms_thr has been moved to a dict named nms as '\n            'iou_threshold, max_num has been renamed as max_per_img, '\n            'name of original arguments and the way to specify '\n            'iou_threshold of NMS will be deprecated.')\n    if 'nms' not in cfg:\n        cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))\n    if 'max_num' in cfg:\n        if 'max_per_img' in cfg:\n            assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \\\n                f'max_per_img at the same time, but get {cfg.max_num} ' \\\n                f'and {cfg.max_per_img} respectively' \\\n                f'Please delete max_num which will be deprecated.'\n        else:\n            cfg.max_per_img = cfg.max_num\n    if 'nms_thr' in cfg:\n        assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \\\n            f'iou_threshold in nms and ' \\\n            f'nms_thr at the same time, but get ' \\\n            f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \\\n            f' respectively. Please delete the nms_thr ' \\\n            f'which will be deprecated.'\n\n    recovered_proposals = []\n    for proposals, img_info in zip(aug_proposals, img_metas):\n        img_shape = img_info['img_shape']\n        scale_factor = img_info['scale_factor']\n        flip = img_info['flip']\n        flip_direction = img_info['flip_direction']\n        _proposals = proposals.clone()\n        _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape,\n                                              scale_factor, flip,\n                                              flip_direction)\n        recovered_proposals.append(_proposals)\n    aug_proposals = torch.cat(recovered_proposals, dim=0)\n    merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(),\n                              aug_proposals[:, -1].contiguous(),\n                              cfg.nms.iou_threshold)\n    scores = merged_proposals[:, 4]\n    _, order = scores.sort(0, descending=True)\n    num = min(cfg.max_per_img, merged_proposals.shape[0])\n    order = order[:num]\n    merged_proposals = merged_proposals[order, :]\n    return merged_proposals\n\n\ndef merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):\n    \"\"\"Merge augmented detection bboxes and scores.\n\n    Args:\n        aug_bboxes (list[Tensor]): shape (n, 4*#class)\n        aug_scores (list[Tensor] or None): shape (n, #class)\n        img_shapes (list[Tensor]): shape (3, ).\n        rcnn_test_cfg (dict): rcnn test config.\n\n    Returns:\n        tuple: (bboxes, scores)\n    \"\"\"\n    recovered_bboxes = []\n    for bboxes, img_info in zip(aug_bboxes, img_metas):\n        img_shape = img_info[0]['img_shape']\n        scale_factor = img_info[0]['scale_factor']\n        flip = img_info[0]['flip']\n        flip_direction = img_info[0]['flip_direction']\n        bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,\n                                   flip_direction)\n        recovered_bboxes.append(bboxes)\n    bboxes = torch.stack(recovered_bboxes).mean(dim=0)\n    if aug_scores is None:\n        return bboxes\n    else:\n        scores = torch.stack(aug_scores).mean(dim=0)\n        return bboxes, scores\n\n\ndef merge_aug_scores(aug_scores):\n    \"\"\"Merge augmented bbox scores.\"\"\"\n    if isinstance(aug_scores[0], torch.Tensor):\n        return torch.mean(torch.stack(aug_scores), dim=0)\n    else:\n        return np.mean(aug_scores, axis=0)\n\n\ndef merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):\n    \"\"\"Merge augmented mask prediction.\n\n    Args:\n        aug_masks (list[ndarray]): shape (n, #class, h, w)\n        img_shapes (list[ndarray]): shape (3, ).\n        rcnn_test_cfg (dict): rcnn test config.\n\n    Returns:\n        tuple: (bboxes, scores)\n    \"\"\"\n    recovered_masks = []\n    for mask, img_info in zip(aug_masks, img_metas):\n        flip = img_info[0]['flip']\n        if flip:\n            flip_direction = img_info[0]['flip_direction']\n            if flip_direction == 'horizontal':\n                mask = mask[:, :, :, ::-1]\n            elif flip_direction == 'vertical':\n                mask = mask[:, :, ::-1, :]\n            elif flip_direction == 'diagonal':\n                mask = mask[:, :, :, ::-1]\n                mask = mask[:, :, ::-1, :]\n            else:\n                raise ValueError(\n                    f\"Invalid flipping direction '{flip_direction}'\")\n        recovered_masks.append(mask)\n\n    if weights is None:\n        merged_masks = np.mean(recovered_masks, axis=0)\n    else:\n        merged_masks = np.average(\n            np.array(recovered_masks), axis=0, weights=np.array(weights))\n    return merged_masks\n"
  },
  {
    "path": "mmdet/core/utils/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads,\n                         reduce_mean, sync_random_seed)\nfrom .misc import (center_of_mass, filter_scores_and_topk, flip_tensor,\n                   generate_coordinate, mask2ndarray, multi_apply,\n                   select_single_mlvl, unmap)\n\n__all__ = [\n    'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply',\n    'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict',\n    'center_of_mass', 'generate_coordinate', 'select_single_mlvl',\n    'filter_scores_and_topk', 'sync_random_seed'\n]\n"
  },
  {
    "path": "mmdet/core/utils/dist_utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport functools\nimport pickle\nimport warnings\nfrom collections import OrderedDict\n\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom mmcv.runner import OptimizerHook, get_dist_info\nfrom torch._utils import (_flatten_dense_tensors, _take_tensors,\n                          _unflatten_dense_tensors)\n\n\ndef _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):\n    if bucket_size_mb > 0:\n        bucket_size_bytes = bucket_size_mb * 1024 * 1024\n        buckets = _take_tensors(tensors, bucket_size_bytes)\n    else:\n        buckets = OrderedDict()\n        for tensor in tensors:\n            tp = tensor.type()\n            if tp not in buckets:\n                buckets[tp] = []\n            buckets[tp].append(tensor)\n        buckets = buckets.values()\n\n    for bucket in buckets:\n        flat_tensors = _flatten_dense_tensors(bucket)\n        dist.all_reduce(flat_tensors)\n        flat_tensors.div_(world_size)\n        for tensor, synced in zip(\n                bucket, _unflatten_dense_tensors(flat_tensors, bucket)):\n            tensor.copy_(synced)\n\n\ndef allreduce_grads(params, coalesce=True, bucket_size_mb=-1):\n    \"\"\"Allreduce gradients.\n\n    Args:\n        params (list[torch.Parameters]): List of parameters of a model\n        coalesce (bool, optional): Whether allreduce parameters as a whole.\n            Defaults to True.\n        bucket_size_mb (int, optional): Size of bucket, the unit is MB.\n            Defaults to -1.\n    \"\"\"\n    grads = [\n        param.grad.data for param in params\n        if param.requires_grad and param.grad is not None\n    ]\n    world_size = dist.get_world_size()\n    if coalesce:\n        _allreduce_coalesced(grads, world_size, bucket_size_mb)\n    else:\n        for tensor in grads:\n            dist.all_reduce(tensor.div_(world_size))\n\n\nclass DistOptimizerHook(OptimizerHook):\n    \"\"\"Deprecated optimizer hook for distributed training.\"\"\"\n\n    def __init__(self, *args, **kwargs):\n        warnings.warn('\"DistOptimizerHook\" is deprecated, please switch to'\n                      '\"mmcv.runner.OptimizerHook\".')\n        super().__init__(*args, **kwargs)\n\n\ndef reduce_mean(tensor):\n    \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n    if not (dist.is_available() and dist.is_initialized()):\n        return tensor\n    tensor = tensor.clone()\n    dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n    return tensor\n\n\ndef obj2tensor(pyobj, device='cuda'):\n    \"\"\"Serialize picklable python object to tensor.\"\"\"\n    storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj))\n    return torch.ByteTensor(storage).to(device=device)\n\n\ndef tensor2obj(tensor):\n    \"\"\"Deserialize tensor to picklable python object.\"\"\"\n    return pickle.loads(tensor.cpu().numpy().tobytes())\n\n\n@functools.lru_cache()\ndef _get_global_gloo_group():\n    \"\"\"Return a process group based on gloo backend, containing all the ranks\n    The result is cached.\"\"\"\n    if dist.get_backend() == 'nccl':\n        return dist.new_group(backend='gloo')\n    else:\n        return dist.group.WORLD\n\n\ndef all_reduce_dict(py_dict, op='sum', group=None, to_float=True):\n    \"\"\"Apply all reduce function for python dict object.\n\n    The code is modified from https://github.com/Megvii-\n    BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py.\n\n    NOTE: make sure that py_dict in different ranks has the same keys and\n    the values should be in the same shape. Currently only supports\n    nccl backend.\n\n    Args:\n        py_dict (dict): Dict to be applied all reduce op.\n        op (str): Operator, could be 'sum' or 'mean'. Default: 'sum'\n        group (:obj:`torch.distributed.group`, optional): Distributed group,\n            Default: None.\n        to_float (bool): Whether to convert all values of dict to float.\n            Default: True.\n\n    Returns:\n        OrderedDict: reduced python dict object.\n    \"\"\"\n    warnings.warn(\n        'group` is deprecated. Currently only supports NCCL backend.')\n    _, world_size = get_dist_info()\n    if world_size == 1:\n        return py_dict\n\n    # all reduce logic across different devices.\n    py_key = list(py_dict.keys())\n    if not isinstance(py_dict, OrderedDict):\n        py_key_tensor = obj2tensor(py_key)\n        dist.broadcast(py_key_tensor, src=0)\n        py_key = tensor2obj(py_key_tensor)\n\n    tensor_shapes = [py_dict[k].shape for k in py_key]\n    tensor_numels = [py_dict[k].numel() for k in py_key]\n\n    if to_float:\n        warnings.warn('Note: the \"to_float\" is True, you need to '\n                      'ensure that the behavior is reasonable.')\n        flatten_tensor = torch.cat(\n            [py_dict[k].flatten().float() for k in py_key])\n    else:\n        flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key])\n\n    dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM)\n    if op == 'mean':\n        flatten_tensor /= world_size\n\n    split_tensors = [\n        x.reshape(shape) for x, shape in zip(\n            torch.split(flatten_tensor, tensor_numels), tensor_shapes)\n    ]\n    out_dict = {k: v for k, v in zip(py_key, split_tensors)}\n    if isinstance(py_dict, OrderedDict):\n        out_dict = OrderedDict(out_dict)\n    return out_dict\n\n\ndef sync_random_seed(seed=None, device='cuda'):\n    \"\"\"Make sure different ranks share the same seed.\n\n    All workers must call this function, otherwise it will deadlock.\n    This method is generally used in `DistributedSampler`,\n    because the seed should be identical across all processes\n    in the distributed group.\n\n    In distributed sampling, different ranks should sample non-overlapped\n    data in the dataset. Therefore, this function is used to make sure that\n    each rank shuffles the data indices in the same order based\n    on the same seed. Then different ranks could use different indices\n    to select non-overlapped data from the same data list.\n\n    Args:\n        seed (int, Optional): The seed. Default to None.\n        device (str): The device where the seed will be put on.\n            Default to 'cuda'.\n\n    Returns:\n        int: Seed to be used.\n    \"\"\"\n    if seed is None:\n        seed = np.random.randint(2**31)\n    assert isinstance(seed, int)\n\n    rank, world_size = get_dist_info()\n\n    if world_size == 1:\n        return seed\n\n    if rank == 0:\n        random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n    else:\n        random_num = torch.tensor(0, dtype=torch.int32, device=device)\n    dist.broadcast(random_num, src=0)\n    return random_num.item()\n"
  },
  {
    "path": "mmdet/core/utils/misc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom functools import partial\n\nimport numpy as np\nimport torch\nfrom six.moves import map, zip\n\nfrom ..mask.structures import BitmapMasks, PolygonMasks\n\n\ndef multi_apply(func, *args, **kwargs):\n    \"\"\"Apply function to a list of arguments.\n\n    Note:\n        This function applies the ``func`` to multiple inputs and\n        map the multiple outputs of the ``func`` into different\n        list. Each list contains the same type of outputs corresponding\n        to different inputs.\n\n    Args:\n        func (Function): A function that will be applied to a list of\n            arguments\n\n    Returns:\n        tuple(list): A tuple containing multiple list, each list contains \\\n            a kind of returned results by the function\n    \"\"\"\n    pfunc = partial(func, **kwargs) if kwargs else func\n    map_results = map(pfunc, *args)\n    return tuple(map(list, zip(*map_results)))\n\n\ndef unmap(data, count, inds, fill=0):\n    \"\"\"Unmap a subset of item (data) back to the original set of items (of size\n    count)\"\"\"\n    if data.dim() == 1:\n        ret = data.new_full((count, ), fill)\n        ret[inds.type(torch.bool)] = data\n    else:\n        new_size = (count, ) + data.size()[1:]\n        ret = data.new_full(new_size, fill)\n        ret[inds.type(torch.bool), :] = data\n    return ret\n\n\ndef mask2ndarray(mask):\n    \"\"\"Convert Mask to ndarray..\n\n    Args:\n        mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or\n        torch.Tensor or np.ndarray): The mask to be converted.\n\n    Returns:\n        np.ndarray: Ndarray mask of shape (n, h, w) that has been converted\n    \"\"\"\n    if isinstance(mask, (BitmapMasks, PolygonMasks)):\n        mask = mask.to_ndarray()\n    elif isinstance(mask, torch.Tensor):\n        mask = mask.detach().cpu().numpy()\n    elif not isinstance(mask, np.ndarray):\n        raise TypeError(f'Unsupported {type(mask)} data type')\n    return mask\n\n\ndef flip_tensor(src_tensor, flip_direction):\n    \"\"\"flip tensor base on flip_direction.\n\n    Args:\n        src_tensor (Tensor): input feature map, shape (B, C, H, W).\n        flip_direction (str): The flipping direction. Options are\n          'horizontal', 'vertical', 'diagonal'.\n\n    Returns:\n        out_tensor (Tensor): Flipped tensor.\n    \"\"\"\n    assert src_tensor.ndim == 4\n    valid_directions = ['horizontal', 'vertical', 'diagonal']\n    assert flip_direction in valid_directions\n    if flip_direction == 'horizontal':\n        out_tensor = torch.flip(src_tensor, [3])\n    elif flip_direction == 'vertical':\n        out_tensor = torch.flip(src_tensor, [2])\n    else:\n        out_tensor = torch.flip(src_tensor, [2, 3])\n    return out_tensor\n\n\ndef select_single_mlvl(mlvl_tensors, batch_id, detach=True):\n    \"\"\"Extract a multi-scale single image tensor from a multi-scale batch\n    tensor based on batch index.\n\n    Note: The default value of detach is True, because the proposal gradient\n    needs to be detached during the training of the two-stage model. E.g\n    Cascade Mask R-CNN.\n\n    Args:\n        mlvl_tensors (list[Tensor]): Batch tensor for all scale levels,\n           each is a 4D-tensor.\n        batch_id (int): Batch index.\n        detach (bool): Whether detach gradient. Default True.\n\n    Returns:\n        list[Tensor]: Multi-scale single image tensor.\n    \"\"\"\n    assert isinstance(mlvl_tensors, (list, tuple))\n    num_levels = len(mlvl_tensors)\n\n    if detach:\n        mlvl_tensor_list = [\n            mlvl_tensors[i][batch_id].detach() for i in range(num_levels)\n        ]\n    else:\n        mlvl_tensor_list = [\n            mlvl_tensors[i][batch_id] for i in range(num_levels)\n        ]\n    return mlvl_tensor_list\n\n\ndef filter_scores_and_topk(scores, score_thr, topk, results=None):\n    \"\"\"Filter results using score threshold and topk candidates.\n\n    Args:\n        scores (Tensor): The scores, shape (num_bboxes, K).\n        score_thr (float): The score filter threshold.\n        topk (int): The number of topk candidates.\n        results (dict or list or Tensor, Optional): The results to\n           which the filtering rule is to be applied. The shape\n           of each item is (num_bboxes, N).\n\n    Returns:\n        tuple: Filtered results\n\n            - scores (Tensor): The scores after being filtered, \\\n                shape (num_bboxes_filtered, ).\n            - labels (Tensor): The class labels, shape \\\n                (num_bboxes_filtered, ).\n            - anchor_idxs (Tensor): The anchor indexes, shape \\\n                (num_bboxes_filtered, ).\n            - filtered_results (dict or list or Tensor, Optional): \\\n                The filtered results. The shape of each item is \\\n                (num_bboxes_filtered, N).\n    \"\"\"\n    valid_mask = scores > score_thr\n    scores = scores[valid_mask]\n    valid_idxs = torch.nonzero(valid_mask)\n\n    num_topk = min(topk, valid_idxs.size(0))\n    # torch.sort is actually faster than .topk (at least on GPUs)\n    scores, idxs = scores.sort(descending=True)\n    scores = scores[:num_topk]\n    topk_idxs = valid_idxs[idxs[:num_topk]]\n    keep_idxs, labels = topk_idxs.unbind(dim=1)\n\n    filtered_results = None\n    if results is not None:\n        if isinstance(results, dict):\n            filtered_results = {k: v[keep_idxs] for k, v in results.items()}\n        elif isinstance(results, list):\n            filtered_results = [result[keep_idxs] for result in results]\n        elif isinstance(results, torch.Tensor):\n            filtered_results = results[keep_idxs]\n        else:\n            raise NotImplementedError(f'Only supports dict or list or Tensor, '\n                                      f'but get {type(results)}.')\n    return scores, labels, keep_idxs, filtered_results\n\n\ndef center_of_mass(mask, esp=1e-6):\n    \"\"\"Calculate the centroid coordinates of the mask.\n\n    Args:\n        mask (Tensor): The mask to be calculated, shape (h, w).\n        esp (float): Avoid dividing by zero. Default: 1e-6.\n\n    Returns:\n        tuple[Tensor]: the coordinates of the center point of the mask.\n\n            - center_h (Tensor): the center point of the height.\n            - center_w (Tensor): the center point of the width.\n    \"\"\"\n    h, w = mask.shape\n    grid_h = torch.arange(h, device=mask.device)[:, None]\n    grid_w = torch.arange(w, device=mask.device)\n    normalizer = mask.sum().float().clamp(min=esp)\n    center_h = (mask * grid_h).sum() / normalizer\n    center_w = (mask * grid_w).sum() / normalizer\n    return center_h, center_w\n\n\ndef generate_coordinate(featmap_sizes, device='cuda'):\n    \"\"\"Generate the coordinate.\n\n    Args:\n        featmap_sizes (tuple): The feature to be calculated,\n            of shape (N, C, W, H).\n        device (str): The device where the feature will be put on.\n    Returns:\n        coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H).\n    \"\"\"\n\n    x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device)\n    y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device)\n    y, x = torch.meshgrid(y_range, x_range)\n    y = y.expand([featmap_sizes[0], 1, -1, -1])\n    x = x.expand([featmap_sizes[0], 1, -1, -1])\n    coord_feat = torch.cat([x, y], 1)\n\n    return coord_feat\n"
  },
  {
    "path": "mmdet/core/visualization/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .image import (color_val_matplotlib, imshow_det_bboxes,\n                    imshow_gt_det_bboxes)\nfrom .palette import get_palette, palette_val\n\n__all__ = [\n    'imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib',\n    'palette_val', 'get_palette'\n]\n"
  },
  {
    "path": "mmdet/core/visualization/image.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport sys\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as mask_util\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Polygon\n\nfrom mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET\nfrom ..mask.structures import bitmap_to_polygon\nfrom ..utils import mask2ndarray\nfrom .palette import get_palette, palette_val\n\n__all__ = [\n    'color_val_matplotlib', 'draw_masks', 'draw_bboxes', 'draw_labels',\n    'imshow_det_bboxes', 'imshow_gt_det_bboxes'\n]\n\nEPS = 1e-2\n\n\ndef color_val_matplotlib(color):\n    \"\"\"Convert various input in BGR order to normalized RGB matplotlib color\n    tuples.\n\n    Args:\n        color (:obj`Color` | str | tuple | int | ndarray): Color inputs.\n\n    Returns:\n        tuple[float]: A tuple of 3 normalized floats indicating RGB channels.\n    \"\"\"\n    color = mmcv.color_val(color)\n    color = [color / 255 for color in color[::-1]]\n    return tuple(color)\n\n\ndef _get_adaptive_scales(areas, min_area=800, max_area=30000):\n    \"\"\"Get adaptive scales according to areas.\n\n    The scale range is [0.5, 1.0]. When the area is less than\n    ``'min_area'``, the scale is 0.5 while the area is larger than\n    ``'max_area'``, the scale is 1.0.\n\n    Args:\n        areas (ndarray): The areas of bboxes or masks with the\n            shape of (n, ).\n        min_area (int): Lower bound areas for adaptive scales.\n            Default: 800.\n        max_area (int): Upper bound areas for adaptive scales.\n            Default: 30000.\n\n    Returns:\n        ndarray: The adaotive scales with the shape of (n, ).\n    \"\"\"\n    scales = 0.5 + (areas - min_area) / (max_area - min_area)\n    scales = np.clip(scales, 0.5, 1.0)\n    return scales\n\n\ndef _get_bias_color(base, max_dist=30):\n    \"\"\"Get different colors for each masks.\n\n    Get different colors for each masks by adding a bias\n    color to the base category color.\n    Args:\n        base (ndarray): The base category color with the shape\n            of (3, ).\n        max_dist (int): The max distance of bias. Default: 30.\n\n    Returns:\n        ndarray: The new color for a mask with the shape of (3, ).\n    \"\"\"\n    new_color = base + np.random.randint(\n        low=-max_dist, high=max_dist + 1, size=3)\n    return np.clip(new_color, 0, 255, new_color)\n\n\ndef draw_bboxes(ax, bboxes, color='g', alpha=0.8, thickness=2):\n    \"\"\"Draw bounding boxes on the axes.\n\n    Args:\n        ax (matplotlib.Axes): The input axes.\n        bboxes (ndarray): The input bounding boxes with the shape\n            of (n, 4).\n        color (list[tuple] | matplotlib.color): the colors for each\n            bounding boxes.\n        alpha (float): Transparency of bounding boxes. Default: 0.8.\n        thickness (int): Thickness of lines. Default: 2.\n\n    Returns:\n        matplotlib.Axes: The result axes.\n    \"\"\"\n    polygons = []\n    for i, bbox in enumerate(bboxes):\n        bbox_int = bbox.astype(np.int32)\n        poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]],\n                [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]]\n        np_poly = np.array(poly).reshape((4, 2))\n        polygons.append(Polygon(np_poly))\n    p = PatchCollection(\n        polygons,\n        facecolor='none',\n        edgecolors=color,\n        linewidths=thickness,\n        alpha=alpha)\n    ax.add_collection(p)\n\n    return ax\n\n\ndef draw_labels(ax,\n                labels,\n                positions,\n                scores=None,\n                class_names=None,\n                color='w',\n                font_size=8,\n                scales=None,\n                horizontal_alignment='left'):\n    \"\"\"Draw labels on the axes.\n\n    Args:\n        ax (matplotlib.Axes): The input axes.\n        labels (ndarray): The labels with the shape of (n, ).\n        positions (ndarray): The positions to draw each labels.\n        scores (ndarray): The scores for each labels.\n        class_names (list[str]): The class names.\n        color (list[tuple] | matplotlib.color): The colors for labels.\n        font_size (int): Font size of texts. Default: 8.\n        scales (list[float]): Scales of texts. Default: None.\n        horizontal_alignment (str): The horizontal alignment method of\n            texts. Default: 'left'.\n\n    Returns:\n        matplotlib.Axes: The result axes.\n    \"\"\"\n    for i, (pos, label) in enumerate(zip(positions, labels)):\n        label_text = class_names[\n            label] if class_names is not None else f'class {label}'\n        if scores is not None:\n            label_text += f'|{scores[i]:.02f}'\n        text_color = color[i] if isinstance(color, list) else color\n\n        font_size_mask = font_size if scales is None else font_size * scales[i]\n        ax.text(\n            pos[0],\n            pos[1],\n            f'{label_text}',\n            bbox={\n                'facecolor': 'black',\n                'alpha': 0.8,\n                'pad': 0.7,\n                'edgecolor': 'none'\n            },\n            color=text_color,\n            fontsize=font_size_mask,\n            verticalalignment='top',\n            horizontalalignment=horizontal_alignment)\n\n    return ax\n\n\ndef draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8):\n    \"\"\"Draw masks on the image and their edges on the axes.\n\n    Args:\n        ax (matplotlib.Axes): The input axes.\n        img (ndarray): The image with the shape of (3, h, w).\n        masks (ndarray): The masks with the shape of (n, h, w).\n        color (ndarray): The colors for each masks with the shape\n            of (n, 3).\n        with_edge (bool): Whether to draw edges. Default: True.\n        alpha (float): Transparency of bounding boxes. Default: 0.8.\n\n    Returns:\n        matplotlib.Axes: The result axes.\n        ndarray: The result image.\n    \"\"\"\n    taken_colors = set([0, 0, 0])\n    if color is None:\n        random_colors = np.random.randint(0, 255, (masks.size(0), 3))\n        color = [tuple(c) for c in random_colors]\n        color = np.array(color, dtype=np.uint8)\n    polygons = []\n    for i, mask in enumerate(masks):\n        if with_edge:\n            contours, _ = bitmap_to_polygon(mask)\n            polygons += [Polygon(c) for c in contours]\n\n        color_mask = color[i]\n        while tuple(color_mask) in taken_colors:\n            color_mask = _get_bias_color(color_mask)\n        taken_colors.add(tuple(color_mask))\n\n        mask = mask.astype(bool)\n        img[mask] = img[mask] * (1 - alpha) + color_mask * alpha\n\n    p = PatchCollection(\n        polygons, facecolor='none', edgecolors='w', linewidths=1, alpha=0.8)\n    ax.add_collection(p)\n\n    return ax, img\n\n\ndef imshow_det_bboxes(img,\n                      bboxes=None,\n                      labels=None,\n                      segms=None,\n                      class_names=None,\n                      score_thr=0,\n                      bbox_color='green',\n                      text_color='green',\n                      mask_color=None,\n                      thickness=2,\n                      font_size=8,\n                      win_name='',\n                      show=True,\n                      wait_time=0,\n                      out_file=None):\n    \"\"\"Draw bboxes and class labels (with scores) on an image.\n\n    Args:\n        img (str | ndarray): The image to be displayed.\n        bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or\n            (n, 5).\n        labels (ndarray): Labels of bboxes.\n        segms (ndarray | None): Masks, shaped (n,h,w) or None.\n        class_names (list[str]): Names of each classes.\n        score_thr (float): Minimum score of bboxes to be shown. Default: 0.\n        bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines.\n           If a single color is given, it will be applied to all classes.\n           The tuple of color should be in RGB order. Default: 'green'.\n        text_color (list[tuple] | tuple | str | None): Colors of texts.\n           If a single color is given, it will be applied to all classes.\n           The tuple of color should be in RGB order. Default: 'green'.\n        mask_color (list[tuple] | tuple | str | None, optional): Colors of\n           masks. If a single color is given, it will be applied to all\n           classes. The tuple of color should be in RGB order.\n           Default: None.\n        thickness (int): Thickness of lines. Default: 2.\n        font_size (int): Font size of texts. Default: 13.\n        show (bool): Whether to show the image. Default: True.\n        win_name (str): The window name. Default: ''.\n        wait_time (float): Value of waitKey param. Default: 0.\n        out_file (str, optional): The filename to write the image.\n            Default: None.\n\n    Returns:\n        ndarray: The image with bboxes drawn on it.\n    \"\"\"\n    assert bboxes is None or bboxes.ndim == 2, \\\n        f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.'\n    assert labels.ndim == 1, \\\n        f' labels ndim should be 1, but its ndim is {labels.ndim}.'\n    assert bboxes is None or bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \\\n        f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.'\n    assert bboxes is None or bboxes.shape[0] <= labels.shape[0], \\\n        'labels.shape[0] should not be less than bboxes.shape[0].'\n    assert segms is None or segms.shape[0] == labels.shape[0], \\\n        'segms.shape[0] and labels.shape[0] should have the same length.'\n    assert segms is not None or bboxes is not None, \\\n        'segms and bboxes should not be None at the same time.'\n\n    img = mmcv.imread(img).astype(np.uint8)\n\n    if score_thr > 0:\n        assert bboxes is not None and bboxes.shape[1] == 5\n        scores = bboxes[:, -1]\n        inds = scores > score_thr\n        bboxes = bboxes[inds, :]\n        labels = labels[inds]\n        if segms is not None:\n            segms = segms[inds, ...]\n\n    img = mmcv.bgr2rgb(img)\n    width, height = img.shape[1], img.shape[0]\n    img = np.ascontiguousarray(img)\n\n    fig = plt.figure(win_name, frameon=False)\n    plt.title(win_name)\n    canvas = fig.canvas\n    dpi = fig.get_dpi()\n    # add a small EPS to avoid precision lost due to matplotlib's truncation\n    # (https://github.com/matplotlib/matplotlib/issues/15363)\n    fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi)\n\n    # remove white edges by set subplot margin\n    plt.subplots_adjust(left=0, right=1, bottom=0, top=1)\n    ax = plt.gca()\n    ax.axis('off')\n\n    max_label = int(max(labels) if len(labels) > 0 else 0)\n    text_palette = palette_val(get_palette(text_color, max_label + 1))\n    text_colors = [text_palette[label] for label in labels]\n\n    num_bboxes = 0\n    if bboxes is not None:\n        num_bboxes = bboxes.shape[0]\n        bbox_palette = palette_val(get_palette(bbox_color, max_label + 1))\n        colors = [bbox_palette[label] for label in labels[:num_bboxes]]\n        draw_bboxes(ax, bboxes, colors, alpha=0.8, thickness=thickness)\n\n        horizontal_alignment = 'left'\n        positions = bboxes[:, :2].astype(np.int32) + thickness\n        areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0])\n        scales = _get_adaptive_scales(areas)\n        scores = bboxes[:, 4] if bboxes.shape[1] == 5 else None\n        draw_labels(\n            ax,\n            labels[:num_bboxes],\n            positions,\n            scores=scores,\n            class_names=class_names,\n            color=text_colors,\n            font_size=font_size,\n            scales=scales,\n            horizontal_alignment=horizontal_alignment)\n\n    if segms is not None:\n        mask_palette = get_palette(mask_color, max_label + 1)\n        colors = [mask_palette[label] for label in labels]\n        colors = np.array(colors, dtype=np.uint8)\n        draw_masks(ax, img, segms, colors, with_edge=True)\n\n        if num_bboxes < segms.shape[0]:\n            segms = segms[num_bboxes:]\n            horizontal_alignment = 'center'\n            areas = []\n            positions = []\n            for mask in segms:\n                _, _, stats, centroids = cv2.connectedComponentsWithStats(\n                    mask.astype(np.uint8), connectivity=8)\n                largest_id = np.argmax(stats[1:, -1]) + 1\n                positions.append(centroids[largest_id])\n                areas.append(stats[largest_id, -1])\n            areas = np.stack(areas, axis=0)\n            scales = _get_adaptive_scales(areas)\n            draw_labels(\n                ax,\n                labels[num_bboxes:],\n                positions,\n                class_names=class_names,\n                color=text_colors,\n                font_size=font_size,\n                scales=scales,\n                horizontal_alignment=horizontal_alignment)\n\n    plt.imshow(img)\n\n    stream, _ = canvas.print_to_buffer()\n    buffer = np.frombuffer(stream, dtype='uint8')\n    if sys.platform == 'darwin':\n        width, height = canvas.get_width_height(physical=True)\n    img_rgba = buffer.reshape(height, width, 4)\n    rgb, alpha = np.split(img_rgba, [3], axis=2)\n    img = rgb.astype('uint8')\n    img = mmcv.rgb2bgr(img)\n\n    if show:\n        # We do not use cv2 for display because in some cases, opencv will\n        # conflict with Qt, it will output a warning: Current thread\n        # is not the object's thread. You can refer to\n        # https://github.com/opencv/opencv-python/issues/46 for details\n        if wait_time == 0:\n            plt.show()\n        else:\n            plt.show(block=False)\n            plt.pause(wait_time)\n    if out_file is not None:\n        mmcv.imwrite(img, out_file)\n\n    plt.close()\n\n    return img\n\n\ndef imshow_gt_det_bboxes(img,\n                         annotation,\n                         result,\n                         class_names=None,\n                         score_thr=0,\n                         gt_bbox_color=(61, 102, 255),\n                         gt_text_color=(200, 200, 200),\n                         gt_mask_color=(61, 102, 255),\n                         det_bbox_color=(241, 101, 72),\n                         det_text_color=(200, 200, 200),\n                         det_mask_color=(241, 101, 72),\n                         thickness=2,\n                         font_size=13,\n                         win_name='',\n                         show=True,\n                         wait_time=0,\n                         out_file=None,\n                         overlay_gt_pred=True):\n    \"\"\"General visualization GT and result function.\n\n    Args:\n      img (str | ndarray): The image to be displayed.\n      annotation (dict): Ground truth annotations where contain keys of\n          'gt_bboxes' and 'gt_labels' or 'gt_masks'.\n      result (tuple[list] | list): The detection result, can be either\n          (bbox, segm) or just bbox.\n      class_names (list[str]): Names of each classes.\n      score_thr (float): Minimum score of bboxes to be shown. Default: 0.\n      gt_bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines.\n          If a single color is given, it will be applied to all classes.\n          The tuple of color should be in RGB order. Default: (61, 102, 255).\n      gt_text_color (list[tuple] | tuple | str | None): Colors of texts.\n          If a single color is given, it will be applied to all classes.\n          The tuple of color should be in RGB order. Default: (200, 200, 200).\n      gt_mask_color (list[tuple] | tuple | str | None, optional): Colors of\n          masks. If a single color is given, it will be applied to all classes.\n          The tuple of color should be in RGB order. Default: (61, 102, 255).\n      det_bbox_color (list[tuple] | tuple | str | None):Colors of bbox lines.\n          If a single color is given, it will be applied to all classes.\n          The tuple of color should be in RGB order. Default: (241, 101, 72).\n      det_text_color (list[tuple] | tuple | str | None):Colors of texts.\n          If a single color is given, it will be applied to all classes.\n          The tuple of color should be in RGB order. Default: (200, 200, 200).\n      det_mask_color (list[tuple] | tuple | str | None, optional): Color of\n          masks. If a single color is given, it will be applied to all classes.\n          The tuple of color should be in RGB order. Default: (241, 101, 72).\n      thickness (int): Thickness of lines. Default: 2.\n      font_size (int): Font size of texts. Default: 13.\n      win_name (str): The window name. Default: ''.\n      show (bool): Whether to show the image. Default: True.\n      wait_time (float): Value of waitKey param. Default: 0.\n      out_file (str, optional): The filename to write the image.\n          Default: None.\n      overlay_gt_pred (bool): Whether to plot gts and predictions on the\n       same image. If False, predictions and gts will be plotted on two same\n       image which will be concatenated in vertical direction. The image\n       above is drawn with gt, and the image below is drawn with the\n       prediction result. Default: True.\n\n    Returns:\n        ndarray: The image with bboxes or masks drawn on it.\n    \"\"\"\n    assert 'gt_bboxes' in annotation\n    assert 'gt_labels' in annotation\n    assert isinstance(result, (tuple, list, dict)), 'Expected ' \\\n        f'tuple or list or dict, but get {type(result)}'\n\n    gt_bboxes = annotation['gt_bboxes']\n    gt_labels = annotation['gt_labels']\n    gt_masks = annotation.get('gt_masks', None)\n    if gt_masks is not None:\n        gt_masks = mask2ndarray(gt_masks)\n\n    gt_seg = annotation.get('gt_semantic_seg', None)\n    if gt_seg is not None:\n        pad_value = 255  # the padding value of gt_seg\n        sem_labels = np.unique(gt_seg)\n        all_labels = np.concatenate((gt_labels, sem_labels), axis=0)\n        all_labels, counts = np.unique(all_labels, return_counts=True)\n        stuff_labels = all_labels[np.logical_and(counts < 2,\n                                                 all_labels != pad_value)]\n        stuff_masks = gt_seg[None] == stuff_labels[:, None, None]\n        gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0)\n        gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)),\n                                  axis=0)\n        # If you need to show the bounding boxes,\n        # please comment the following line\n        # gt_bboxes = None\n\n    img = mmcv.imread(img)\n\n    img_with_gt = imshow_det_bboxes(\n        img,\n        gt_bboxes,\n        gt_labels,\n        gt_masks,\n        class_names=class_names,\n        bbox_color=gt_bbox_color,\n        text_color=gt_text_color,\n        mask_color=gt_mask_color,\n        thickness=thickness,\n        font_size=font_size,\n        win_name=win_name,\n        show=False)\n\n    if not isinstance(result, dict):\n        if isinstance(result, tuple):\n            bbox_result, segm_result = result\n            if isinstance(segm_result, tuple):\n                segm_result = segm_result[0]  # ms rcnn\n        else:\n            bbox_result, segm_result = result, None\n\n        bboxes = np.vstack(bbox_result)\n        labels = [\n            np.full(bbox.shape[0], i, dtype=np.int32)\n            for i, bbox in enumerate(bbox_result)\n        ]\n        labels = np.concatenate(labels)\n\n        segms = None\n        if segm_result is not None and len(labels) > 0:  # non empty\n            segms = mmcv.concat_list(segm_result)\n            segms = mask_util.decode(segms)\n            segms = segms.transpose(2, 0, 1)\n    else:\n        assert class_names is not None, 'We need to know the number ' \\\n                                        'of classes.'\n        VOID = len(class_names)\n        bboxes = None\n        pan_results = result['pan_results']\n        # keep objects ahead\n        ids = np.unique(pan_results)[::-1]\n        legal_indices = ids != VOID\n        ids = ids[legal_indices]\n        labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)\n        segms = (pan_results[None] == ids[:, None, None])\n\n    if overlay_gt_pred:\n        img = imshow_det_bboxes(\n            img_with_gt,\n            bboxes,\n            labels,\n            segms=segms,\n            class_names=class_names,\n            score_thr=score_thr,\n            bbox_color=det_bbox_color,\n            text_color=det_text_color,\n            mask_color=det_mask_color,\n            thickness=thickness,\n            font_size=font_size,\n            win_name=win_name,\n            show=show,\n            wait_time=wait_time,\n            out_file=out_file)\n    else:\n        img_with_det = imshow_det_bboxes(\n            img,\n            bboxes,\n            labels,\n            segms=segms,\n            class_names=class_names,\n            score_thr=score_thr,\n            bbox_color=det_bbox_color,\n            text_color=det_text_color,\n            mask_color=det_mask_color,\n            thickness=thickness,\n            font_size=font_size,\n            win_name=win_name,\n            show=False)\n        img = np.concatenate([img_with_gt, img_with_det], axis=0)\n\n        plt.imshow(img)\n        if show:\n            if wait_time == 0:\n                plt.show()\n            else:\n                plt.show(block=False)\n                plt.pause(wait_time)\n        if out_file is not None:\n            mmcv.imwrite(img, out_file)\n        plt.close()\n\n    return img\n"
  },
  {
    "path": "mmdet/core/visualization/palette.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\n\n\ndef palette_val(palette):\n    \"\"\"Convert palette to matplotlib palette.\n\n    Args:\n        palette List[tuple]: A list of color tuples.\n\n    Returns:\n        List[tuple[float]]: A list of RGB matplotlib color tuples.\n    \"\"\"\n    new_palette = []\n    for color in palette:\n        color = [c / 255 for c in color]\n        new_palette.append(tuple(color))\n    return new_palette\n\n\ndef get_palette(palette, num_classes):\n    \"\"\"Get palette from various inputs.\n\n    Args:\n        palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs.\n        num_classes (int): the number of classes.\n\n    Returns:\n        list[tuple[int]]: A list of color tuples.\n    \"\"\"\n    assert isinstance(num_classes, int)\n\n    if isinstance(palette, list):\n        dataset_palette = palette\n    elif isinstance(palette, tuple):\n        dataset_palette = [palette] * num_classes\n    elif palette == 'random' or palette is None:\n        state = np.random.get_state()\n        # random color\n        np.random.seed(42)\n        palette = np.random.randint(0, 256, size=(num_classes, 3))\n        np.random.set_state(state)\n        dataset_palette = [tuple(c) for c in palette]\n    elif palette == 'coco':\n        from mmdet.datasets import CocoDataset, CocoPanopticDataset\n        dataset_palette = CocoDataset.PALETTE\n        if len(dataset_palette) < num_classes:\n            dataset_palette = CocoPanopticDataset.PALETTE\n    elif palette == 'citys':\n        from mmdet.datasets import CityscapesDataset\n        dataset_palette = CityscapesDataset.PALETTE\n    elif palette == 'voc':\n        from mmdet.datasets import VOCDataset\n        dataset_palette = VOCDataset.PALETTE\n    elif mmcv.is_str(palette):\n        dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes\n    else:\n        raise TypeError(f'Invalid type for palette: {type(palette)}')\n\n    assert len(dataset_palette) >= num_classes, \\\n        'The length of palette should not be less than `num_classes`.'\n    return dataset_palette\n"
  },
  {
    "path": "mmdet/datasets/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .builder import DATASETS, PIPELINES, build_dataloader, build_dataset\nfrom .cityscapes import CityscapesDataset\nfrom .coco import CocoDataset\nfrom .coco_occluded import OccludedSeparatedCocoDataset\nfrom .coco_panoptic import CocoPanopticDataset\nfrom .custom import CustomDataset\nfrom .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,\n                               MultiImageMixDataset, RepeatDataset)\nfrom .deepfashion import DeepFashionDataset\nfrom .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset\nfrom .objects365 import Objects365V1Dataset, Objects365V2Dataset\nfrom .openimages import OpenImagesChallengeDataset, OpenImagesDataset\nfrom .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler\nfrom .utils import (NumClassCheckHook, get_loading_pipeline,\n                    replace_ImageToTensor)\nfrom .voc import VOCDataset\nfrom .wider_face import WIDERFaceDataset\nfrom .xml_style import XMLDataset\n\n__all__ = [\n    'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset',\n    'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset',\n    'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler',\n    'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset',\n    'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES',\n    'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline',\n    'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset',\n    'OpenImagesDataset', 'OpenImagesChallengeDataset', 'Objects365V1Dataset',\n    'Objects365V2Dataset', 'OccludedSeparatedCocoDataset'\n]\n"
  },
  {
    "path": "mmdet/datasets/api_wrappers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .coco_api import COCO, COCOeval\nfrom .panoptic_evaluation import pq_compute_multi_core, pq_compute_single_core\n\n__all__ = [\n    'COCO', 'COCOeval', 'pq_compute_multi_core', 'pq_compute_single_core'\n]\n"
  },
  {
    "path": "mmdet/datasets/api_wrappers/coco_api.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# This file add snake case alias for coco api\n\nimport warnings\n\nimport pycocotools\nfrom pycocotools.coco import COCO as _COCO\nfrom pycocotools.cocoeval import COCOeval as _COCOeval\n\n\nclass COCO(_COCO):\n    \"\"\"This class is almost the same as official pycocotools package.\n\n    It implements some snake case function aliases. So that the COCO class has\n    the same interface as LVIS class.\n    \"\"\"\n\n    def __init__(self, annotation_file=None):\n        if getattr(pycocotools, '__version__', '0') >= '12.0.2':\n            warnings.warn(\n                'mmpycocotools is deprecated. Please install official pycocotools by \"pip install pycocotools\"',  # noqa: E501\n                UserWarning)\n        super().__init__(annotation_file=annotation_file)\n        self.img_ann_map = self.imgToAnns\n        self.cat_img_map = self.catToImgs\n\n    def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):\n        return self.getAnnIds(img_ids, cat_ids, area_rng, iscrowd)\n\n    def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):\n        return self.getCatIds(cat_names, sup_names, cat_ids)\n\n    def get_img_ids(self, img_ids=[], cat_ids=[]):\n        return self.getImgIds(img_ids, cat_ids)\n\n    def load_anns(self, ids):\n        return self.loadAnns(ids)\n\n    def load_cats(self, ids):\n        return self.loadCats(ids)\n\n    def load_imgs(self, ids):\n        return self.loadImgs(ids)\n\n\n# just for the ease of import\nCOCOeval = _COCOeval\n"
  },
  {
    "path": "mmdet/datasets/api_wrappers/panoptic_evaluation.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\n# Copyright (c) 2018, Alexander Kirillov\n# This file supports `file_client` for `panopticapi`,\n# the source code is copied from `panopticapi`,\n# only the way to load the gt images is modified.\nimport multiprocessing\nimport os\n\nimport mmcv\nimport numpy as np\n\ntry:\n    from panopticapi.evaluation import OFFSET, VOID, PQStat\n    from panopticapi.utils import rgb2id\nexcept ImportError:\n    PQStat = None\n    rgb2id = None\n    VOID = 0\n    OFFSET = 256 * 256 * 256\n\n\ndef pq_compute_single_core(proc_id,\n                           annotation_set,\n                           gt_folder,\n                           pred_folder,\n                           categories,\n                           file_client=None,\n                           print_log=False):\n    \"\"\"The single core function to evaluate the metric of Panoptic\n    Segmentation.\n\n    Same as the function with the same name in `panopticapi`. Only the function\n    to load the images is changed to use the file client.\n\n    Args:\n        proc_id (int): The id of the mini process.\n        gt_folder (str): The path of the ground truth images.\n        pred_folder (str): The path of the prediction images.\n        categories (str): The categories of the dataset.\n        file_client (object): The file client of the dataset. If None,\n            the backend will be set to `disk`.\n        print_log (bool): Whether to print the log. Defaults to False.\n    \"\"\"\n    if PQStat is None:\n        raise RuntimeError(\n            'panopticapi is not installed, please install it by: '\n            'pip install git+https://github.com/cocodataset/'\n            'panopticapi.git.')\n\n    if file_client is None:\n        file_client_args = dict(backend='disk')\n        file_client = mmcv.FileClient(**file_client_args)\n\n    pq_stat = PQStat()\n\n    idx = 0\n    for gt_ann, pred_ann in annotation_set:\n        if print_log and idx % 100 == 0:\n            print('Core: {}, {} from {} images processed'.format(\n                proc_id, idx, len(annotation_set)))\n        idx += 1\n        # The gt images can be on the local disk or `ceph`, so we use\n        # file_client here.\n        img_bytes = file_client.get(\n            os.path.join(gt_folder, gt_ann['file_name']))\n        pan_gt = mmcv.imfrombytes(img_bytes, flag='color', channel_order='rgb')\n        pan_gt = rgb2id(pan_gt)\n\n        # The predictions can only be on the local dist now.\n        pan_pred = mmcv.imread(\n            os.path.join(pred_folder, pred_ann['file_name']),\n            flag='color',\n            channel_order='rgb')\n        pan_pred = rgb2id(pan_pred)\n\n        gt_segms = {el['id']: el for el in gt_ann['segments_info']}\n        pred_segms = {el['id']: el for el in pred_ann['segments_info']}\n\n        # predicted segments area calculation + prediction sanity checks\n        pred_labels_set = set(el['id'] for el in pred_ann['segments_info'])\n        labels, labels_cnt = np.unique(pan_pred, return_counts=True)\n        for label, label_cnt in zip(labels, labels_cnt):\n            if label not in pred_segms:\n                if label == VOID:\n                    continue\n                raise KeyError(\n                    'In the image with ID {} segment with ID {} is '\n                    'presented in PNG and not presented in JSON.'.format(\n                        gt_ann['image_id'], label))\n            pred_segms[label]['area'] = label_cnt\n            pred_labels_set.remove(label)\n            if pred_segms[label]['category_id'] not in categories:\n                raise KeyError(\n                    'In the image with ID {} segment with ID {} has '\n                    'unknown category_id {}.'.format(\n                        gt_ann['image_id'], label,\n                        pred_segms[label]['category_id']))\n        if len(pred_labels_set) != 0:\n            raise KeyError(\n                'In the image with ID {} the following segment IDs {} '\n                'are presented in JSON and not presented in PNG.'.format(\n                    gt_ann['image_id'], list(pred_labels_set)))\n\n        # confusion matrix calculation\n        pan_gt_pred = pan_gt.astype(np.uint64) * OFFSET + pan_pred.astype(\n            np.uint64)\n        gt_pred_map = {}\n        labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True)\n        for label, intersection in zip(labels, labels_cnt):\n            gt_id = label // OFFSET\n            pred_id = label % OFFSET\n            gt_pred_map[(gt_id, pred_id)] = intersection\n\n        # count all matched pairs\n        gt_matched = set()\n        pred_matched = set()\n        for label_tuple, intersection in gt_pred_map.items():\n            gt_label, pred_label = label_tuple\n            if gt_label not in gt_segms:\n                continue\n            if pred_label not in pred_segms:\n                continue\n            if gt_segms[gt_label]['iscrowd'] == 1:\n                continue\n            if gt_segms[gt_label]['category_id'] != pred_segms[pred_label][\n                    'category_id']:\n                continue\n\n            union = pred_segms[pred_label]['area'] + gt_segms[gt_label][\n                'area'] - intersection - gt_pred_map.get((VOID, pred_label), 0)\n            iou = intersection / union\n            if iou > 0.5:\n                pq_stat[gt_segms[gt_label]['category_id']].tp += 1\n                pq_stat[gt_segms[gt_label]['category_id']].iou += iou\n                gt_matched.add(gt_label)\n                pred_matched.add(pred_label)\n\n        # count false positives\n        crowd_labels_dict = {}\n        for gt_label, gt_info in gt_segms.items():\n            if gt_label in gt_matched:\n                continue\n            # crowd segments are ignored\n            if gt_info['iscrowd'] == 1:\n                crowd_labels_dict[gt_info['category_id']] = gt_label\n                continue\n            pq_stat[gt_info['category_id']].fn += 1\n\n        # count false positives\n        for pred_label, pred_info in pred_segms.items():\n            if pred_label in pred_matched:\n                continue\n            # intersection of the segment with VOID\n            intersection = gt_pred_map.get((VOID, pred_label), 0)\n            # plus intersection with corresponding CROWD region if it exists\n            if pred_info['category_id'] in crowd_labels_dict:\n                intersection += gt_pred_map.get(\n                    (crowd_labels_dict[pred_info['category_id']], pred_label),\n                    0)\n            # predicted segment is ignored if more than half of\n            # the segment correspond to VOID and CROWD regions\n            if intersection / pred_info['area'] > 0.5:\n                continue\n            pq_stat[pred_info['category_id']].fp += 1\n\n    if print_log:\n        print('Core: {}, all {} images processed'.format(\n            proc_id, len(annotation_set)))\n    return pq_stat\n\n\ndef pq_compute_multi_core(matched_annotations_list,\n                          gt_folder,\n                          pred_folder,\n                          categories,\n                          file_client=None,\n                          nproc=32):\n    \"\"\"Evaluate the metrics of Panoptic Segmentation with multithreading.\n\n    Same as the function with the same name in `panopticapi`.\n\n    Args:\n        matched_annotations_list (list): The matched annotation list. Each\n            element is a tuple of annotations of the same image with the\n            format (gt_anns, pred_anns).\n        gt_folder (str): The path of the ground truth images.\n        pred_folder (str): The path of the prediction images.\n        categories (str): The categories of the dataset.\n        file_client (object): The file client of the dataset. If None,\n            the backend will be set to `disk`.\n        nproc (int): Number of processes for panoptic quality computing.\n            Defaults to 32. When `nproc` exceeds the number of cpu cores,\n            the number of cpu cores is used.\n    \"\"\"\n    if PQStat is None:\n        raise RuntimeError(\n            'panopticapi is not installed, please install it by: '\n            'pip install git+https://github.com/cocodataset/'\n            'panopticapi.git.')\n\n    if file_client is None:\n        file_client_args = dict(backend='disk')\n        file_client = mmcv.FileClient(**file_client_args)\n\n    cpu_num = min(nproc, multiprocessing.cpu_count())\n\n    annotations_split = np.array_split(matched_annotations_list, cpu_num)\n    print('Number of cores: {}, images per core: {}'.format(\n        cpu_num, len(annotations_split[0])))\n    workers = multiprocessing.Pool(processes=cpu_num)\n    processes = []\n    for proc_id, annotation_set in enumerate(annotations_split):\n        p = workers.apply_async(pq_compute_single_core,\n                                (proc_id, annotation_set, gt_folder,\n                                 pred_folder, categories, file_client))\n        processes.append(p)\n\n    # Close the process pool, otherwise it will lead to memory\n    # leaking problems.\n    workers.close()\n    workers.join()\n\n    pq_stat = PQStat()\n    for p in processes:\n        pq_stat += p.get()\n\n    return pq_stat\n"
  },
  {
    "path": "mmdet/datasets/builder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport platform\nimport random\nimport warnings\nfrom functools import partial\n\nimport numpy as np\nimport torch\nfrom mmcv.parallel import collate\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import TORCH_VERSION, Registry, build_from_cfg, digit_version\nfrom torch.utils.data import DataLoader\n\nfrom .samplers import (ClassAwareSampler, DistributedGroupSampler,\n                       DistributedSampler, GroupSampler, InfiniteBatchSampler,\n                       InfiniteGroupBatchSampler)\n\nif platform.system() != 'Windows':\n    # https://github.com/pytorch/pytorch/issues/973\n    import resource\n    rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)\n    base_soft_limit = rlimit[0]\n    hard_limit = rlimit[1]\n    soft_limit = min(max(4096, base_soft_limit), hard_limit)\n    resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))\n\nDATASETS = Registry('dataset')\nPIPELINES = Registry('pipeline')\n\n\ndef _concat_dataset(cfg, default_args=None):\n    from .dataset_wrappers import ConcatDataset\n    ann_files = cfg['ann_file']\n    img_prefixes = cfg.get('img_prefix', None)\n    seg_prefixes = cfg.get('seg_prefix', None)\n    proposal_files = cfg.get('proposal_file', None)\n    separate_eval = cfg.get('separate_eval', True)\n\n    datasets = []\n    num_dset = len(ann_files)\n    for i in range(num_dset):\n        data_cfg = copy.deepcopy(cfg)\n        # pop 'separate_eval' since it is not a valid key for common datasets.\n        if 'separate_eval' in data_cfg:\n            data_cfg.pop('separate_eval')\n        data_cfg['ann_file'] = ann_files[i]\n        if isinstance(img_prefixes, (list, tuple)):\n            data_cfg['img_prefix'] = img_prefixes[i]\n        if isinstance(seg_prefixes, (list, tuple)):\n            data_cfg['seg_prefix'] = seg_prefixes[i]\n        if isinstance(proposal_files, (list, tuple)):\n            data_cfg['proposal_file'] = proposal_files[i]\n        datasets.append(build_dataset(data_cfg, default_args))\n\n    return ConcatDataset(datasets, separate_eval)\n\n\ndef build_dataset(cfg, default_args=None):\n    from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset,\n                                   MultiImageMixDataset, RepeatDataset)\n    if isinstance(cfg, (list, tuple)):\n        dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])\n    elif cfg['type'] == 'ConcatDataset':\n        dataset = ConcatDataset(\n            [build_dataset(c, default_args) for c in cfg['datasets']],\n            cfg.get('separate_eval', True))\n    elif cfg['type'] == 'RepeatDataset':\n        dataset = RepeatDataset(\n            build_dataset(cfg['dataset'], default_args), cfg['times'])\n    elif cfg['type'] == 'ClassBalancedDataset':\n        dataset = ClassBalancedDataset(\n            build_dataset(cfg['dataset'], default_args), cfg['oversample_thr'])\n    elif cfg['type'] == 'MultiImageMixDataset':\n        cp_cfg = copy.deepcopy(cfg)\n        cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])\n        cp_cfg.pop('type')\n        dataset = MultiImageMixDataset(**cp_cfg)\n    elif isinstance(cfg.get('ann_file'), (list, tuple)):\n        dataset = _concat_dataset(cfg, default_args)\n    else:\n        dataset = build_from_cfg(cfg, DATASETS, default_args)\n\n    return dataset\n\n\ndef build_dataloader(dataset,\n                     samples_per_gpu,\n                     workers_per_gpu,\n                     num_gpus=1,\n                     dist=True,\n                     shuffle=True,\n                     seed=None,\n                     runner_type='EpochBasedRunner',\n                     persistent_workers=False,\n                     class_aware_sampler=None,\n                     **kwargs):\n    \"\"\"Build PyTorch DataLoader.\n\n    In distributed training, each GPU/process has a dataloader.\n    In non-distributed training, there is only one dataloader for all GPUs.\n\n    Args:\n        dataset (Dataset): A PyTorch dataset.\n        samples_per_gpu (int): Number of training samples on each GPU, i.e.,\n            batch size of each GPU.\n        workers_per_gpu (int): How many subprocesses to use for data loading\n            for each GPU.\n        num_gpus (int): Number of GPUs. Only used in non-distributed training.\n        dist (bool): Distributed training/test or not. Default: True.\n        shuffle (bool): Whether to shuffle the data at every epoch.\n            Default: True.\n        seed (int, Optional): Seed to be used. Default: None.\n        runner_type (str): Type of runner. Default: `EpochBasedRunner`\n        persistent_workers (bool): If True, the data loader will not shutdown\n            the worker processes after a dataset has been consumed once.\n            This allows to maintain the workers `Dataset` instances alive.\n            This argument is only valid when PyTorch>=1.7.0. Default: False.\n        class_aware_sampler (dict): Whether to use `ClassAwareSampler`\n            during training. Default: None.\n        kwargs: any keyword argument to be used to initialize DataLoader\n\n    Returns:\n        DataLoader: A PyTorch dataloader.\n    \"\"\"\n    rank, world_size = get_dist_info()\n\n    if dist:\n        # When model is :obj:`DistributedDataParallel`,\n        # `batch_size` of :obj:`dataloader` is the\n        # number of training samples on each GPU.\n        batch_size = samples_per_gpu\n        num_workers = workers_per_gpu\n    else:\n        # When model is obj:`DataParallel`\n        # the batch size is samples on all the GPUS\n        batch_size = num_gpus * samples_per_gpu\n        num_workers = num_gpus * workers_per_gpu\n\n    if runner_type == 'IterBasedRunner':\n        # this is a batch sampler, which can yield\n        # a mini-batch indices each time.\n        # it can be used in both `DataParallel` and\n        # `DistributedDataParallel`\n        if shuffle:\n            batch_sampler = InfiniteGroupBatchSampler(\n                dataset, batch_size, world_size, rank, seed=seed)\n        else:\n            batch_sampler = InfiniteBatchSampler(\n                dataset,\n                batch_size,\n                world_size,\n                rank,\n                seed=seed,\n                shuffle=False)\n        batch_size = 1\n        sampler = None\n    else:\n        if class_aware_sampler is not None:\n            # ClassAwareSampler can be used in both distributed and\n            # non-distributed training.\n            num_sample_class = class_aware_sampler.get('num_sample_class', 1)\n            sampler = ClassAwareSampler(\n                dataset,\n                samples_per_gpu,\n                world_size,\n                rank,\n                seed=seed,\n                num_sample_class=num_sample_class)\n        elif dist:\n            # DistributedGroupSampler will definitely shuffle the data to\n            # satisfy that images on each GPU are in the same group\n            if shuffle:\n                sampler = DistributedGroupSampler(\n                    dataset, samples_per_gpu, world_size, rank, seed=seed)\n            else:\n                sampler = DistributedSampler(\n                    dataset, world_size, rank, shuffle=False, seed=seed)\n        else:\n            sampler = GroupSampler(dataset,\n                                   samples_per_gpu) if shuffle else None\n        batch_sampler = None\n\n    init_fn = partial(\n        worker_init_fn, num_workers=num_workers, rank=rank,\n        seed=seed) if seed is not None else None\n\n    if (TORCH_VERSION != 'parrots'\n            and digit_version(TORCH_VERSION) >= digit_version('1.7.0')):\n        kwargs['persistent_workers'] = persistent_workers\n    elif persistent_workers is True:\n        warnings.warn('persistent_workers is invalid because your pytorch '\n                      'version is lower than 1.7.0')\n\n    data_loader = DataLoader(\n        dataset,\n        batch_size=batch_size,\n        sampler=sampler,\n        num_workers=num_workers,\n        batch_sampler=batch_sampler,\n        collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),\n        pin_memory=kwargs.pop('pin_memory', False),\n        worker_init_fn=init_fn,\n        **kwargs)\n\n    return data_loader\n\n\ndef worker_init_fn(worker_id, num_workers, rank, seed):\n    # The seed of each worker equals to\n    # num_worker * rank + worker_id + user_seed\n    worker_seed = num_workers * rank + worker_id + seed\n    np.random.seed(worker_seed)\n    random.seed(worker_seed)\n    torch.manual_seed(worker_seed)\n"
  },
  {
    "path": "mmdet/datasets/cityscapes.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa\n# and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa\n\nimport glob\nimport os\nimport os.path as osp\nimport tempfile\nfrom collections import OrderedDict\n\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\nfrom mmcv.utils import print_log\n\nfrom .builder import DATASETS\nfrom .coco import CocoDataset\n\n\n@DATASETS.register_module()\nclass CityscapesDataset(CocoDataset):\n\n    CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle',\n               'bicycle')\n\n    PALETTE = [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70),\n               (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)]\n\n    def _filter_imgs(self, min_size=32):\n        \"\"\"Filter images too small or without ground truths.\"\"\"\n        valid_inds = []\n        # obtain images that contain annotation\n        ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n        # obtain images that contain annotations of the required categories\n        ids_in_cat = set()\n        for i, class_id in enumerate(self.cat_ids):\n            ids_in_cat |= set(self.coco.cat_img_map[class_id])\n        # merge the image id sets of the two conditions and use the merged set\n        # to filter out images if self.filter_empty_gt=True\n        ids_in_cat &= ids_with_ann\n\n        valid_img_ids = []\n        for i, img_info in enumerate(self.data_infos):\n            img_id = img_info['id']\n            ann_ids = self.coco.getAnnIds(imgIds=[img_id])\n            ann_info = self.coco.loadAnns(ann_ids)\n            all_iscrowd = all([_['iscrowd'] for _ in ann_info])\n            if self.filter_empty_gt and (self.img_ids[i] not in ids_in_cat\n                                         or all_iscrowd):\n                continue\n            if min(img_info['width'], img_info['height']) >= min_size:\n                valid_inds.append(i)\n                valid_img_ids.append(img_id)\n        self.img_ids = valid_img_ids\n        return valid_inds\n\n    def _parse_ann_info(self, img_info, ann_info):\n        \"\"\"Parse bbox and mask annotation.\n\n        Args:\n            img_info (dict): Image info of an image.\n            ann_info (list[dict]): Annotation info of an image.\n\n        Returns:\n            dict: A dict containing the following keys: bboxes, \\\n                bboxes_ignore, labels, masks, seg_map. \\\n                \"masks\" are already decoded into binary masks.\n        \"\"\"\n        gt_bboxes = []\n        gt_labels = []\n        gt_bboxes_ignore = []\n        gt_masks_ann = []\n\n        for i, ann in enumerate(ann_info):\n            if ann.get('ignore', False):\n                continue\n            x1, y1, w, h = ann['bbox']\n            if ann['area'] <= 0 or w < 1 or h < 1:\n                continue\n            if ann['category_id'] not in self.cat_ids:\n                continue\n            bbox = [x1, y1, x1 + w, y1 + h]\n            if ann.get('iscrowd', False):\n                gt_bboxes_ignore.append(bbox)\n            else:\n                gt_bboxes.append(bbox)\n                gt_labels.append(self.cat2label[ann['category_id']])\n                gt_masks_ann.append(ann['segmentation'])\n\n        if gt_bboxes:\n            gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n            gt_labels = np.array(gt_labels, dtype=np.int64)\n        else:\n            gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n            gt_labels = np.array([], dtype=np.int64)\n\n        if gt_bboxes_ignore:\n            gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n        else:\n            gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n        ann = dict(\n            bboxes=gt_bboxes,\n            labels=gt_labels,\n            bboxes_ignore=gt_bboxes_ignore,\n            masks=gt_masks_ann,\n            seg_map=img_info['segm_file'])\n\n        return ann\n\n    def results2txt(self, results, outfile_prefix):\n        \"\"\"Dump the detection results to a txt file.\n\n        Args:\n            results (list[list | tuple]): Testing results of the\n                dataset.\n            outfile_prefix (str): The filename prefix of the json files.\n                If the prefix is \"somepath/xxx\",\n                the txt files will be named \"somepath/xxx.txt\".\n\n        Returns:\n            list[str]: Result txt files which contains corresponding \\\n                instance segmentation images.\n        \"\"\"\n        try:\n            import cityscapesscripts.helpers.labels as CSLabels\n        except ImportError:\n            raise ImportError('Please run \"pip install citscapesscripts\" to '\n                              'install cityscapesscripts first.')\n        result_files = []\n        os.makedirs(outfile_prefix, exist_ok=True)\n        prog_bar = mmcv.ProgressBar(len(self))\n        for idx in range(len(self)):\n            result = results[idx]\n            filename = self.data_infos[idx]['filename']\n            basename = osp.splitext(osp.basename(filename))[0]\n            pred_txt = osp.join(outfile_prefix, basename + '_pred.txt')\n\n            bbox_result, segm_result = result\n            bboxes = np.vstack(bbox_result)\n            # segm results\n            if isinstance(segm_result, tuple):\n                # Some detectors use different scores for bbox and mask,\n                # like Mask Scoring R-CNN. Score of segm will be used instead\n                # of bbox score.\n                segms = mmcv.concat_list(segm_result[0])\n                mask_score = segm_result[1]\n            else:\n                # use bbox score for mask score\n                segms = mmcv.concat_list(segm_result)\n                mask_score = [bbox[-1] for bbox in bboxes]\n            labels = [\n                np.full(bbox.shape[0], i, dtype=np.int32)\n                for i, bbox in enumerate(bbox_result)\n            ]\n            labels = np.concatenate(labels)\n\n            assert len(bboxes) == len(segms) == len(labels)\n            num_instances = len(bboxes)\n            prog_bar.update()\n            with open(pred_txt, 'w') as fout:\n                for i in range(num_instances):\n                    pred_class = labels[i]\n                    classes = self.CLASSES[pred_class]\n                    class_id = CSLabels.name2label[classes].id\n                    score = mask_score[i]\n                    mask = maskUtils.decode(segms[i]).astype(np.uint8)\n                    png_filename = osp.join(outfile_prefix,\n                                            basename + f'_{i}_{classes}.png')\n                    mmcv.imwrite(mask, png_filename)\n                    fout.write(f'{osp.basename(png_filename)} {class_id} '\n                               f'{score}\\n')\n            result_files.append(pred_txt)\n\n        return result_files\n\n    def format_results(self, results, txtfile_prefix=None):\n        \"\"\"Format the results to txt (standard format for Cityscapes\n        evaluation).\n\n        Args:\n            results (list): Testing results of the dataset.\n            txtfile_prefix (str | None): The prefix of txt files. It includes\n                the file path and the prefix of filename, e.g., \"a/b/prefix\".\n                If not specified, a temp file will be created. Default: None.\n\n        Returns:\n            tuple: (result_files, tmp_dir), result_files is a dict containing \\\n                the json filepaths, tmp_dir is the temporal directory created \\\n                for saving txt/png files when txtfile_prefix is not specified.\n        \"\"\"\n        assert isinstance(results, list), 'results must be a list'\n        assert len(results) == len(self), (\n            'The length of results is not equal to the dataset len: {} != {}'.\n            format(len(results), len(self)))\n\n        assert isinstance(results, list), 'results must be a list'\n        assert len(results) == len(self), (\n            'The length of results is not equal to the dataset len: {} != {}'.\n            format(len(results), len(self)))\n\n        if txtfile_prefix is None:\n            tmp_dir = tempfile.TemporaryDirectory()\n            txtfile_prefix = osp.join(tmp_dir.name, 'results')\n        else:\n            tmp_dir = None\n        result_files = self.results2txt(results, txtfile_prefix)\n\n        return result_files, tmp_dir\n\n    def evaluate(self,\n                 results,\n                 metric='bbox',\n                 logger=None,\n                 outfile_prefix=None,\n                 classwise=False,\n                 proposal_nums=(100, 300, 1000),\n                 iou_thrs=np.arange(0.5, 0.96, 0.05)):\n        \"\"\"Evaluation in Cityscapes/COCO protocol.\n\n        Args:\n            results (list[list | tuple]): Testing results of the dataset.\n            metric (str | list[str]): Metrics to be evaluated. Options are\n                'bbox', 'segm', 'proposal', 'proposal_fast'.\n            logger (logging.Logger | str | None): Logger used for printing\n                related information during evaluation. Default: None.\n            outfile_prefix (str | None): The prefix of output file. It includes\n                the file path and the prefix of filename, e.g., \"a/b/prefix\".\n                If results are evaluated with COCO protocol, it would be the\n                prefix of output json file. For example, the metric is 'bbox'\n                and 'segm', then json files would be \"a/b/prefix.bbox.json\" and\n                \"a/b/prefix.segm.json\".\n                If results are evaluated with cityscapes protocol, it would be\n                the prefix of output txt/png files. The output files would be\n                png images under folder \"a/b/prefix/xxx/\" and the file name of\n                images would be written into a txt file\n                \"a/b/prefix/xxx_pred.txt\", where \"xxx\" is the video name of\n                cityscapes. If not specified, a temp file will be created.\n                Default: None.\n            classwise (bool): Whether to evaluating the AP for each class.\n            proposal_nums (Sequence[int]): Proposal number used for evaluating\n                recalls, such as recall@100, recall@1000.\n                Default: (100, 300, 1000).\n            iou_thrs (Sequence[float]): IoU threshold used for evaluating\n                recalls. If set to a list, the average recall of all IoUs will\n                also be computed. Default: 0.5.\n\n        Returns:\n            dict[str, float]: COCO style evaluation metric or cityscapes mAP \\\n                and AP@50.\n        \"\"\"\n        eval_results = dict()\n\n        metrics = metric.copy() if isinstance(metric, list) else [metric]\n\n        if 'cityscapes' in metrics:\n            eval_results.update(\n                self._evaluate_cityscapes(results, outfile_prefix, logger))\n            metrics.remove('cityscapes')\n\n        # left metrics are all coco metric\n        if len(metrics) > 0:\n            # create CocoDataset with CityscapesDataset annotation\n            self_coco = CocoDataset(self.ann_file, self.pipeline.transforms,\n                                    None, self.data_root, self.img_prefix,\n                                    self.seg_prefix, self.seg_suffix,\n                                    self.proposal_file, self.test_mode,\n                                    self.filter_empty_gt)\n            # TODO: remove this in the future\n            # reload annotations of correct class\n            self_coco.CLASSES = self.CLASSES\n            self_coco.data_infos = self_coco.load_annotations(self.ann_file)\n            eval_results.update(\n                self_coco.evaluate(results, metrics, logger, outfile_prefix,\n                                   classwise, proposal_nums, iou_thrs))\n\n        return eval_results\n\n    def _evaluate_cityscapes(self, results, txtfile_prefix, logger):\n        \"\"\"Evaluation in Cityscapes protocol.\n\n        Args:\n            results (list): Testing results of the dataset.\n            txtfile_prefix (str | None): The prefix of output txt file\n            logger (logging.Logger | str | None): Logger used for printing\n                related information during evaluation. Default: None.\n\n        Returns:\n            dict[str: float]: Cityscapes evaluation results, contains 'mAP' \\\n                and 'AP@50'.\n        \"\"\"\n\n        try:\n            import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval  # noqa\n        except ImportError:\n            raise ImportError('Please run \"pip install citscapesscripts\" to '\n                              'install cityscapesscripts first.')\n        msg = 'Evaluating in Cityscapes style'\n        if logger is None:\n            msg = '\\n' + msg\n        print_log(msg, logger=logger)\n\n        result_files, tmp_dir = self.format_results(results, txtfile_prefix)\n\n        if tmp_dir is None:\n            result_dir = osp.join(txtfile_prefix, 'results')\n        else:\n            result_dir = osp.join(tmp_dir.name, 'results')\n\n        eval_results = OrderedDict()\n        print_log(f'Evaluating results under {result_dir} ...', logger=logger)\n\n        # set global states in cityscapes evaluation API\n        CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..')\n        CSEval.args.predictionPath = os.path.abspath(result_dir)\n        CSEval.args.predictionWalk = None\n        CSEval.args.JSONOutput = False\n        CSEval.args.colorized = False\n        CSEval.args.gtInstancesFile = os.path.join(result_dir,\n                                                   'gtInstances.json')\n        CSEval.args.groundTruthSearch = os.path.join(\n            self.img_prefix.replace('leftImg8bit', 'gtFine'),\n            '*/*_gtFine_instanceIds.png')\n\n        groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch)\n        assert len(groundTruthImgList), 'Cannot find ground truth images' \\\n            f' in {CSEval.args.groundTruthSearch}.'\n        predictionImgList = []\n        for gt in groundTruthImgList:\n            predictionImgList.append(CSEval.getPrediction(gt, CSEval.args))\n        CSEval_results = CSEval.evaluateImgLists(predictionImgList,\n                                                 groundTruthImgList,\n                                                 CSEval.args)['averages']\n\n        eval_results['mAP'] = CSEval_results['allAp']\n        eval_results['AP@50'] = CSEval_results['allAp50%']\n        if tmp_dir is not None:\n            tmp_dir.cleanup()\n        return eval_results\n"
  },
  {
    "path": "mmdet/datasets/coco.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport contextlib\nimport io\nimport itertools\nimport logging\nimport os.path as osp\nimport tempfile\nimport warnings\nfrom collections import OrderedDict\n\nimport mmcv\nimport numpy as np\nfrom mmcv.utils import print_log\nfrom terminaltables import AsciiTable\n\nfrom mmdet.core import eval_recalls\nfrom .api_wrappers import COCO, COCOeval\nfrom .builder import DATASETS\nfrom .custom import CustomDataset\n\n\n@DATASETS.register_module()\nclass CocoDataset(CustomDataset):\n\n    CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',\n               'train', 'truck', 'boat', 'traffic light', 'fire hydrant',\n               'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog',\n               'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',\n               'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',\n               'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat',\n               'baseball glove', 'skateboard', 'surfboard', 'tennis racket',\n               'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',\n               'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',\n               'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',\n               'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop',\n               'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n               'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',\n               'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')\n\n    PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230),\n               (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70),\n               (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0),\n               (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255),\n               (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157),\n               (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118),\n               (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182),\n               (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255),\n               (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255),\n               (134, 134, 103), (145, 148, 174), (255, 208, 186),\n               (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255),\n               (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105),\n               (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149),\n               (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205),\n               (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0),\n               (119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88),\n               (95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118),\n               (219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15),\n               (127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0),\n               (95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122),\n               (191, 162, 208)]\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from COCO style annotation file.\n\n        Args:\n            ann_file (str): Path of annotation file.\n\n        Returns:\n            list[dict]: Annotation info from COCO api.\n        \"\"\"\n\n        self.coco = COCO(ann_file)\n        # The order of returned `cat_ids` will not\n        # change with the order of the CLASSES\n        self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)\n\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.img_ids = self.coco.get_img_ids()\n        data_infos = []\n        total_ann_ids = []\n        for i in self.img_ids:\n            info = self.coco.load_imgs([i])[0]\n            info['filename'] = info['file_name']\n            data_infos.append(info)\n            ann_ids = self.coco.get_ann_ids(img_ids=[i])\n            total_ann_ids.extend(ann_ids)\n        assert len(set(total_ann_ids)) == len(\n            total_ann_ids), f\"Annotation ids in '{ann_file}' are not unique!\"\n        return data_infos\n\n    def get_ann_info(self, idx):\n        \"\"\"Get COCO annotation by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Annotation info of specified index.\n        \"\"\"\n\n        img_id = self.data_infos[idx]['id']\n        ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n        ann_info = self.coco.load_anns(ann_ids)\n        return self._parse_ann_info(self.data_infos[idx], ann_info)\n\n    def get_cat_ids(self, idx):\n        \"\"\"Get COCO category ids by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            list[int]: All categories in the image of specified index.\n        \"\"\"\n\n        img_id = self.data_infos[idx]['id']\n        ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n        ann_info = self.coco.load_anns(ann_ids)\n        return [ann['category_id'] for ann in ann_info]\n\n    def _filter_imgs(self, min_size=32):\n        \"\"\"Filter images too small or without ground truths.\"\"\"\n        valid_inds = []\n        # obtain images that contain annotation\n        ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values())\n        # obtain images that contain annotations of the required categories\n        ids_in_cat = set()\n        for i, class_id in enumerate(self.cat_ids):\n            ids_in_cat |= set(self.coco.cat_img_map[class_id])\n        # merge the image id sets of the two conditions and use the merged set\n        # to filter out images if self.filter_empty_gt=True\n        ids_in_cat &= ids_with_ann\n\n        valid_img_ids = []\n        for i, img_info in enumerate(self.data_infos):\n            img_id = self.img_ids[i]\n            if self.filter_empty_gt and img_id not in ids_in_cat:\n                continue\n            if min(img_info['width'], img_info['height']) >= min_size:\n                valid_inds.append(i)\n                valid_img_ids.append(img_id)\n        self.img_ids = valid_img_ids\n        return valid_inds\n\n    def _parse_ann_info(self, img_info, ann_info):\n        \"\"\"Parse bbox and mask annotation.\n\n        Args:\n            ann_info (list[dict]): Annotation info of an image.\n            with_mask (bool): Whether to parse mask annotations.\n\n        Returns:\n            dict: A dict containing the following keys: bboxes, bboxes_ignore,\\\n                labels, masks, seg_map. \"masks\" are raw annotations and not \\\n                decoded into binary masks.\n        \"\"\"\n        gt_bboxes = []\n        gt_labels = []\n        gt_bboxes_ignore = []\n        gt_masks_ann = []\n        for i, ann in enumerate(ann_info):\n            if ann.get('ignore', False):\n                continue\n            x1, y1, w, h = ann['bbox']\n            inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0))\n            inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0))\n            if inter_w * inter_h == 0:\n                continue\n            if ann['area'] <= 0 or w < 1 or h < 1:\n                continue\n            if ann['category_id'] not in self.cat_ids:\n                continue\n            bbox = [x1, y1, x1 + w, y1 + h]\n            if ann.get('iscrowd', False):\n                gt_bboxes_ignore.append(bbox)\n            else:\n                gt_bboxes.append(bbox)\n                gt_labels.append(self.cat2label[ann['category_id']])\n                gt_masks_ann.append(ann.get('segmentation', None))\n\n        if gt_bboxes:\n            gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n            gt_labels = np.array(gt_labels, dtype=np.int64)\n        else:\n            gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n            gt_labels = np.array([], dtype=np.int64)\n\n        if gt_bboxes_ignore:\n            gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n        else:\n            gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n        seg_map = img_info['filename'].rsplit('.', 1)[0] + self.seg_suffix\n\n        ann = dict(\n            bboxes=gt_bboxes,\n            labels=gt_labels,\n            bboxes_ignore=gt_bboxes_ignore,\n            masks=gt_masks_ann,\n            seg_map=seg_map)\n\n        return ann\n\n    def xyxy2xywh(self, bbox):\n        \"\"\"Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO\n        evaluation.\n\n        Args:\n            bbox (numpy.ndarray): The bounding boxes, shape (4, ), in\n                ``xyxy`` order.\n\n        Returns:\n            list[float]: The converted bounding boxes, in ``xywh`` order.\n        \"\"\"\n\n        _bbox = bbox.tolist()\n        return [\n            _bbox[0],\n            _bbox[1],\n            _bbox[2] - _bbox[0],\n            _bbox[3] - _bbox[1],\n        ]\n\n    def _proposal2json(self, results):\n        \"\"\"Convert proposal results to COCO json style.\"\"\"\n        json_results = []\n        for idx in range(len(self)):\n            img_id = self.img_ids[idx]\n            bboxes = results[idx]\n            for i in range(bboxes.shape[0]):\n                data = dict()\n                data['image_id'] = img_id\n                data['bbox'] = self.xyxy2xywh(bboxes[i])\n                data['score'] = float(bboxes[i][4])\n                data['category_id'] = 1\n                json_results.append(data)\n        return json_results\n\n    def _det2json(self, results):\n        \"\"\"Convert detection results to COCO json style.\"\"\"\n        json_results = []\n        for idx in range(len(self)):\n            img_id = self.img_ids[idx]\n            result = results[idx]\n            for label in range(len(result)):\n                bboxes = result[label]\n                for i in range(bboxes.shape[0]):\n                    data = dict()\n                    data['image_id'] = img_id\n                    data['bbox'] = self.xyxy2xywh(bboxes[i])\n                    data['score'] = float(bboxes[i][4])\n                    data['category_id'] = self.cat_ids[label]\n                    json_results.append(data)\n        return json_results\n\n    def _segm2json(self, results):\n        \"\"\"Convert instance segmentation results to COCO json style.\"\"\"\n        bbox_json_results = []\n        segm_json_results = []\n        for idx in range(len(self)):\n            img_id = self.img_ids[idx]\n            det, seg = results[idx]\n            for label in range(len(det)):\n                # bbox results\n                bboxes = det[label]\n                for i in range(bboxes.shape[0]):\n                    data = dict()\n                    data['image_id'] = img_id\n                    data['bbox'] = self.xyxy2xywh(bboxes[i])\n                    data['score'] = float(bboxes[i][4])\n                    data['category_id'] = self.cat_ids[label]\n                    bbox_json_results.append(data)\n\n                # segm results\n                # some detectors use different scores for bbox and mask\n                if isinstance(seg, tuple):\n                    segms = seg[0][label]\n                    mask_score = seg[1][label]\n                else:\n                    segms = seg[label]\n                    mask_score = [bbox[4] for bbox in bboxes]\n                for i in range(bboxes.shape[0]):\n                    data = dict()\n                    data['image_id'] = img_id\n                    data['bbox'] = self.xyxy2xywh(bboxes[i])\n                    data['score'] = float(mask_score[i])\n                    data['category_id'] = self.cat_ids[label]\n                    if isinstance(segms[i]['counts'], bytes):\n                        segms[i]['counts'] = segms[i]['counts'].decode()\n                    data['segmentation'] = segms[i]\n                    segm_json_results.append(data)\n        return bbox_json_results, segm_json_results\n\n    def results2json(self, results, outfile_prefix):\n        \"\"\"Dump the detection results to a COCO style json file.\n\n        There are 3 types of results: proposals, bbox predictions, mask\n        predictions, and they have different data types. This method will\n        automatically recognize the type, and dump them to json files.\n\n        Args:\n            results (list[list | tuple | ndarray]): Testing results of the\n                dataset.\n            outfile_prefix (str): The filename prefix of the json files. If the\n                prefix is \"somepath/xxx\", the json files will be named\n                \"somepath/xxx.bbox.json\", \"somepath/xxx.segm.json\",\n                \"somepath/xxx.proposal.json\".\n\n        Returns:\n            dict[str: str]: Possible keys are \"bbox\", \"segm\", \"proposal\", and \\\n                values are corresponding filenames.\n        \"\"\"\n        result_files = dict()\n        if isinstance(results[0], list):\n            json_results = self._det2json(results)\n            result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n            result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n            mmcv.dump(json_results, result_files['bbox'])\n        elif isinstance(results[0], tuple):\n            json_results = self._segm2json(results)\n            result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n            result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n            result_files['segm'] = f'{outfile_prefix}.segm.json'\n            mmcv.dump(json_results[0], result_files['bbox'])\n            mmcv.dump(json_results[1], result_files['segm'])\n        elif isinstance(results[0], np.ndarray):\n            json_results = self._proposal2json(results)\n            result_files['proposal'] = f'{outfile_prefix}.proposal.json'\n            mmcv.dump(json_results, result_files['proposal'])\n        else:\n            raise TypeError('invalid type of results')\n        return result_files\n\n    def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None):\n        gt_bboxes = []\n        for i in range(len(self.img_ids)):\n            ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i])\n            ann_info = self.coco.load_anns(ann_ids)\n            if len(ann_info) == 0:\n                gt_bboxes.append(np.zeros((0, 4)))\n                continue\n            bboxes = []\n            for ann in ann_info:\n                if ann.get('ignore', False) or ann['iscrowd']:\n                    continue\n                x1, y1, w, h = ann['bbox']\n                bboxes.append([x1, y1, x1 + w, y1 + h])\n            bboxes = np.array(bboxes, dtype=np.float32)\n            if bboxes.shape[0] == 0:\n                bboxes = np.zeros((0, 4))\n            gt_bboxes.append(bboxes)\n\n        recalls = eval_recalls(\n            gt_bboxes, results, proposal_nums, iou_thrs, logger=logger)\n        ar = recalls.mean(axis=1)\n        return ar\n\n    def format_results(self, results, jsonfile_prefix=None, **kwargs):\n        \"\"\"Format the results to json (standard format for COCO evaluation).\n\n        Args:\n            results (list[tuple | numpy.ndarray]): Testing results of the\n                dataset.\n            jsonfile_prefix (str | None): The prefix of json files. It includes\n                the file path and the prefix of filename, e.g., \"a/b/prefix\".\n                If not specified, a temp file will be created. Default: None.\n\n        Returns:\n            tuple: (result_files, tmp_dir), result_files is a dict containing \\\n                the json filepaths, tmp_dir is the temporal directory created \\\n                for saving json files when jsonfile_prefix is not specified.\n        \"\"\"\n        assert isinstance(results, list), 'results must be a list'\n        assert len(results) == len(self), (\n            'The length of results is not equal to the dataset len: {} != {}'.\n            format(len(results), len(self)))\n\n        if jsonfile_prefix is None:\n            tmp_dir = tempfile.TemporaryDirectory()\n            jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n        else:\n            tmp_dir = None\n        result_files = self.results2json(results, jsonfile_prefix)\n        return result_files, tmp_dir\n\n    def evaluate_det_segm(self,\n                          results,\n                          result_files,\n                          coco_gt,\n                          metrics,\n                          logger=None,\n                          classwise=False,\n                          proposal_nums=(100, 300, 1000),\n                          iou_thrs=None,\n                          metric_items=None):\n        \"\"\"Instance segmentation and object detection evaluation in COCO\n        protocol.\n\n        Args:\n            results (list[list | tuple | dict]): Testing results of the\n                dataset.\n            result_files (dict[str, str]): a dict contains json file path.\n            coco_gt (COCO): COCO API object with ground truth annotation.\n            metric (str | list[str]): Metrics to be evaluated. Options are\n                'bbox', 'segm', 'proposal', 'proposal_fast'.\n            logger (logging.Logger | str | None): Logger used for printing\n                related information during evaluation. Default: None.\n            classwise (bool): Whether to evaluating the AP for each class.\n            proposal_nums (Sequence[int]): Proposal number used for evaluating\n                recalls, such as recall@100, recall@1000.\n                Default: (100, 300, 1000).\n            iou_thrs (Sequence[float], optional): IoU threshold used for\n                evaluating recalls/mAPs. If set to a list, the average of all\n                IoUs will also be computed. If not specified, [0.50, 0.55,\n                0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.\n                Default: None.\n            metric_items (list[str] | str, optional): Metric items that will\n                be returned. If not specified, ``['AR@100', 'AR@300',\n                'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be\n                used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',\n                'mAP_s', 'mAP_m', 'mAP_l']`` will be used when\n                ``metric=='bbox' or metric=='segm'``.\n\n        Returns:\n            dict[str, float]: COCO style evaluation metric.\n        \"\"\"\n        if iou_thrs is None:\n            iou_thrs = np.linspace(\n                .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n        if metric_items is not None:\n            if not isinstance(metric_items, list):\n                metric_items = [metric_items]\n\n        eval_results = OrderedDict()\n        for metric in metrics:\n            msg = f'Evaluating {metric}...'\n            if logger is None:\n                msg = '\\n' + msg\n            print_log(msg, logger=logger)\n\n            if metric == 'proposal_fast':\n                if isinstance(results[0], tuple):\n                    raise KeyError('proposal_fast is not supported for '\n                                   'instance segmentation result.')\n                ar = self.fast_eval_recall(\n                    results, proposal_nums, iou_thrs, logger='silent')\n                log_msg = []\n                for i, num in enumerate(proposal_nums):\n                    eval_results[f'AR@{num}'] = ar[i]\n                    log_msg.append(f'\\nAR@{num}\\t{ar[i]:.4f}')\n                log_msg = ''.join(log_msg)\n                print_log(log_msg, logger=logger)\n                continue\n\n            iou_type = 'bbox' if metric == 'proposal' else metric\n            if metric not in result_files:\n                raise KeyError(f'{metric} is not in results')\n            try:\n                predictions = mmcv.load(result_files[metric])\n                if iou_type == 'segm':\n                    # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331  # noqa\n                    # When evaluating mask AP, if the results contain bbox,\n                    # cocoapi will use the box area instead of the mask area\n                    # for calculating the instance area. Though the overall AP\n                    # is not affected, this leads to different\n                    # small/medium/large mask AP results.\n                    for x in predictions:\n                        x.pop('bbox')\n                    warnings.simplefilter('once')\n                    warnings.warn(\n                        'The key \"bbox\" is deleted for more accurate mask AP '\n                        'of small/medium/large instances since v2.12.0. This '\n                        'does not change the overall mAP calculation.',\n                        UserWarning)\n                coco_det = coco_gt.loadRes(predictions)\n            except IndexError:\n                print_log(\n                    'The testing results of the whole dataset is empty.',\n                    logger=logger,\n                    level=logging.ERROR)\n                break\n\n            cocoEval = COCOeval(coco_gt, coco_det, iou_type)\n            cocoEval.params.catIds = self.cat_ids\n            cocoEval.params.imgIds = self.img_ids\n            cocoEval.params.maxDets = list(proposal_nums)\n            cocoEval.params.iouThrs = iou_thrs\n            # mapping of cocoEval.stats\n            coco_metric_names = {\n                'mAP': 0,\n                'mAP_50': 1,\n                'mAP_75': 2,\n                'mAP_s': 3,\n                'mAP_m': 4,\n                'mAP_l': 5,\n                'AR@100': 6,\n                'AR@300': 7,\n                'AR@1000': 8,\n                'AR_s@1000': 9,\n                'AR_m@1000': 10,\n                'AR_l@1000': 11\n            }\n            if metric_items is not None:\n                for metric_item in metric_items:\n                    if metric_item not in coco_metric_names:\n                        raise KeyError(\n                            f'metric item {metric_item} is not supported')\n\n            if metric == 'proposal':\n                cocoEval.params.useCats = 0\n                cocoEval.evaluate()\n                cocoEval.accumulate()\n\n                # Save coco summarize print information to logger\n                redirect_string = io.StringIO()\n                with contextlib.redirect_stdout(redirect_string):\n                    cocoEval.summarize()\n                print_log('\\n' + redirect_string.getvalue(), logger=logger)\n\n                if metric_items is None:\n                    metric_items = [\n                        'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',\n                        'AR_m@1000', 'AR_l@1000'\n                    ]\n\n                for item in metric_items:\n                    val = float(\n                        f'{cocoEval.stats[coco_metric_names[item]]:.4f}')\n                    eval_results[item] = val\n            else:\n                cocoEval.evaluate()\n                cocoEval.accumulate()\n\n                # Save coco summarize print information to logger\n                redirect_string = io.StringIO()\n                with contextlib.redirect_stdout(redirect_string):\n                    cocoEval.summarize()\n                print_log('\\n' + redirect_string.getvalue(), logger=logger)\n\n                if classwise:  # Compute per-category AP\n                    # Compute per-category AP\n                    # from https://github.com/facebookresearch/detectron2/\n                    precisions = cocoEval.eval['precision']\n                    # precision: (iou, recall, cls, area range, max dets)\n                    assert len(self.cat_ids) == precisions.shape[2]\n\n                    results_per_category = []\n                    for idx, catId in enumerate(self.cat_ids):\n                        # area range index 0: all area ranges\n                        # max dets index -1: typically 100 per image\n                        nm = self.coco.loadCats(catId)[0]\n                        precision = precisions[:, :, idx, 0, -1]\n                        precision = precision[precision > -1]\n                        if precision.size:\n                            ap = np.mean(precision)\n                        else:\n                            ap = float('nan')\n                        results_per_category.append(\n                            (f'{nm[\"name\"]}', f'{float(ap):0.3f}'))\n\n                    num_columns = min(6, len(results_per_category) * 2)\n                    results_flatten = list(\n                        itertools.chain(*results_per_category))\n                    headers = ['category', 'AP'] * (num_columns // 2)\n                    results_2d = itertools.zip_longest(*[\n                        results_flatten[i::num_columns]\n                        for i in range(num_columns)\n                    ])\n                    table_data = [headers]\n                    table_data += [result for result in results_2d]\n                    table = AsciiTable(table_data)\n                    print_log('\\n' + table.table, logger=logger)\n\n                if metric_items is None:\n                    metric_items = [\n                        'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'\n                    ]\n\n                for metric_item in metric_items:\n                    key = f'{metric}_{metric_item}'\n                    val = float(\n                        f'{cocoEval.stats[coco_metric_names[metric_item]]:.4f}'\n                    )\n                    eval_results[key] = val\n                ap = cocoEval.stats[:6]\n                eval_results[f'{metric}_mAP_copypaste'] = (\n                    f'{ap[0]:.4f} {ap[1]:.4f} {ap[2]:.4f} {ap[3]:.4f} '\n                    f'{ap[4]:.4f} {ap[5]:.4f}')\n\n        return eval_results\n\n    def evaluate(self,\n                 results,\n                 metric='bbox',\n                 logger=None,\n                 jsonfile_prefix=None,\n                 classwise=False,\n                 proposal_nums=(100, 300, 1000),\n                 iou_thrs=None,\n                 metric_items=None):\n        \"\"\"Evaluation in COCO protocol.\n\n        Args:\n            results (list[list | tuple]): Testing results of the dataset.\n            metric (str | list[str]): Metrics to be evaluated. Options are\n                'bbox', 'segm', 'proposal', 'proposal_fast'.\n            logger (logging.Logger | str | None): Logger used for printing\n                related information during evaluation. Default: None.\n            jsonfile_prefix (str | None): The prefix of json files. It includes\n                the file path and the prefix of filename, e.g., \"a/b/prefix\".\n                If not specified, a temp file will be created. Default: None.\n            classwise (bool): Whether to evaluating the AP for each class.\n            proposal_nums (Sequence[int]): Proposal number used for evaluating\n                recalls, such as recall@100, recall@1000.\n                Default: (100, 300, 1000).\n            iou_thrs (Sequence[float], optional): IoU threshold used for\n                evaluating recalls/mAPs. If set to a list, the average of all\n                IoUs will also be computed. If not specified, [0.50, 0.55,\n                0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.\n                Default: None.\n            metric_items (list[str] | str, optional): Metric items that will\n                be returned. If not specified, ``['AR@100', 'AR@300',\n                'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be\n                used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',\n                'mAP_s', 'mAP_m', 'mAP_l']`` will be used when\n                ``metric=='bbox' or metric=='segm'``.\n\n        Returns:\n            dict[str, float]: COCO style evaluation metric.\n        \"\"\"\n\n        metrics = metric if isinstance(metric, list) else [metric]\n        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']\n        for metric in metrics:\n            if metric not in allowed_metrics:\n                raise KeyError(f'metric {metric} is not supported')\n\n        coco_gt = self.coco\n        self.cat_ids = coco_gt.get_cat_ids(cat_names=self.CLASSES)\n\n        result_files, tmp_dir = self.format_results(results, jsonfile_prefix)\n        eval_results = self.evaluate_det_segm(results, result_files, coco_gt,\n                                              metrics, logger, classwise,\n                                              proposal_nums, iou_thrs,\n                                              metric_items)\n\n        if tmp_dir is not None:\n            tmp_dir.cleanup()\n        return eval_results\n"
  },
  {
    "path": "mmdet/datasets/coco_occluded.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\n\nimport mmcv\nimport numpy as np\nfrom mmcv.fileio import load\nfrom mmcv.utils import print_log\nfrom pycocotools import mask as coco_mask\nfrom terminaltables import AsciiTable\n\nfrom .builder import DATASETS\nfrom .coco import CocoDataset\n\n\n@DATASETS.register_module()\nclass OccludedSeparatedCocoDataset(CocoDataset):\n    \"\"\"COCO dataset with evaluation on separated and occluded masks which\n    presented in paper `A Tri-Layer Plugin to Improve Occluded Detection.\n\n    <https://arxiv.org/abs/2210.10046>`_.\n\n    Separated COCO and Occluded COCO are automatically generated subsets of\n    COCO val dataset, collecting separated objects and partially occluded\n    objects for a large variety of categories. In this way, we define\n    occlusion into two major categories: separated and partially occluded.\n\n    - Separation: target object segmentation mask is separated into distinct\n      regions by the occluder.\n    - Partial Occlusion: target object is partially occluded but the\n      segmentation mask is connected.\n\n    These two new scalable real-image datasets are to benchmark a model's\n    capability to detect occluded objects of 80 common categories.\n\n    Please cite the paper if you use this dataset:\n\n    @article{zhan2022triocc,\n        title={A Tri-Layer Plugin to Improve Occluded Detection},\n        author={Zhan, Guanqi and Xie, Weidi and Zisserman, Andrew},\n        journal={British Machine Vision Conference},\n        year={2022}\n    }\n\n    Args:\n        occluded_ann (str): Path to the occluded coco annotation file.\n        separated_ann (str): Path to the separated coco annotation file.\n    \"\"\"  # noqa\n\n    def __init__(\n            self,\n            *args,\n            occluded_ann='https://www.robots.ox.ac.uk/~vgg/research/tpod/datasets/occluded_coco.pkl',  # noqa\n            separated_ann='https://www.robots.ox.ac.uk/~vgg/research/tpod/datasets/separated_coco.pkl',  # noqa\n            **kwargs):\n        super().__init__(*args, **kwargs)\n\n        # load from local file\n        if osp.isfile(occluded_ann) and not osp.isabs(occluded_ann):\n            occluded_ann = osp.join(self.data_root, occluded_ann)\n        if osp.isfile(separated_ann) and not osp.isabs(separated_ann):\n            separated_ann = osp.join(self.data_root, separated_ann)\n\n        self.occluded_ann = load(occluded_ann)\n        self.separated_ann = load(separated_ann)\n\n    def evaluate(self,\n                 results,\n                 metric=[],\n                 score_thr=0.3,\n                 iou_thr=0.75,\n                 **kwargs):\n        \"\"\"Occluded and separated mask evaluation in COCO protocol.\n\n        Args:\n            results (list[tuple]): Testing results of the dataset.\n            metric (str | list[str]): Metrics to be evaluated. Options are\n                'bbox', 'segm', 'proposal', 'proposal_fast'. Defaults to [].\n            score_thr (float): Score threshold of the detection masks.\n                Defaults to 0.3.\n            iou_thr (float): IoU threshold for the recall calculation.\n                Defaults to 0.75.\n        Returns:\n            dict[str, float]: The recall of occluded and separated masks and\n            COCO style evaluation metric.\n        \"\"\"\n        coco_metric_res = super().evaluate(results, metric=metric, **kwargs)\n        eval_res = self.evaluate_occluded_separated(results, score_thr,\n                                                    iou_thr)\n        coco_metric_res.update(eval_res)\n        return coco_metric_res\n\n    def evaluate_occluded_separated(self,\n                                    results,\n                                    score_thr=0.3,\n                                    iou_thr=0.75):\n        \"\"\"Compute the recall of occluded and separated masks.\n\n        Args:\n            results (list[tuple]): Testing results of the dataset.\n            score_thr (float): Score threshold of the detection masks.\n                Defaults to 0.3.\n            iou_thr (float): IoU threshold for the recall calculation.\n                Defaults to 0.75.\n        Returns:\n            dict[str, float]: The recall of occluded and separated masks.\n        \"\"\"\n        dict_det = {}\n        print_log('processing detection results...')\n        prog_bar = mmcv.ProgressBar(len(results))\n        for i in range(len(results)):\n            cur_img_name = self.data_infos[i]['filename']\n            if cur_img_name not in dict_det.keys():\n                dict_det[cur_img_name] = []\n            for cat_id in range(len(results[i][1])):\n                assert len(results[i][1][cat_id]) == len(results[i][0][cat_id])\n                for instance_id in range(len(results[i][1][cat_id])):\n                    cur_binary_mask = coco_mask.decode(\n                        results[i][1][cat_id][instance_id])\n                    cur_det_bbox = results[i][0][cat_id][instance_id][:4]\n                    dict_det[cur_img_name].append([\n                        results[i][0][cat_id][instance_id][4],\n                        self.CLASSES[cat_id], cur_binary_mask, cur_det_bbox\n                    ])\n            dict_det[cur_img_name].sort(\n                key=lambda x: (-x[0], x[3][0], x[3][1])\n            )  # rank by confidence from high to low, avoid same confidence\n            prog_bar.update()\n        print_log('\\ncomputing occluded mask recall...')\n        occluded_correct_num, occluded_recall = self.compute_recall(\n            dict_det,\n            gt_ann=self.occluded_ann,\n            score_thr=score_thr,\n            iou_thr=iou_thr,\n            is_occ=True)\n        print_log(f'\\nCOCO occluded mask recall: {occluded_recall:.2f}%')\n        print_log(f'COCO occluded mask success num: {occluded_correct_num}')\n        print_log('computing separated mask recall...')\n        separated_correct_num, separated_recall = self.compute_recall(\n            dict_det,\n            gt_ann=self.separated_ann,\n            score_thr=score_thr,\n            iou_thr=iou_thr,\n            is_occ=False)\n        print_log(f'\\nCOCO separated mask recall: {separated_recall:.2f}%')\n        print_log(f'COCO separated mask success num: {separated_correct_num}')\n        table_data = [\n            ['mask type', 'recall', 'num correct'],\n            ['occluded', f'{occluded_recall:.2f}%', occluded_correct_num],\n            ['separated', f'{separated_recall:.2f}%', separated_correct_num]\n        ]\n        table = AsciiTable(table_data)\n        print_log('\\n' + table.table)\n        return dict(\n            occluded_recall=occluded_recall, separated_recall=separated_recall)\n\n    def compute_recall(self,\n                       result_dict,\n                       gt_ann,\n                       score_thr=0.3,\n                       iou_thr=0.75,\n                       is_occ=True):\n        \"\"\"Compute the recall of occluded or separated masks.\n\n        Args:\n            results (list[tuple]): Testing results of the dataset.\n            gt_ann (list): Occluded or separated coco annotations.\n            score_thr (float): Score threshold of the detection masks.\n                Defaults to 0.3.\n            iou_thr (float): IoU threshold for the recall calculation.\n                Defaults to 0.75.\n            is_occ (bool): Whether the annotation is occluded mask.\n                Defaults to True.\n        Returns:\n            tuple: number of correct masks and the recall.\n        \"\"\"\n        correct = 0\n        prog_bar = mmcv.ProgressBar(len(gt_ann))\n        for iter_i in range(len(gt_ann)):\n            cur_item = gt_ann[iter_i]\n            cur_img_name = cur_item[0]\n            cur_gt_bbox = cur_item[3]\n            if is_occ:\n                cur_gt_bbox = [\n                    cur_gt_bbox[0], cur_gt_bbox[1],\n                    cur_gt_bbox[0] + cur_gt_bbox[2],\n                    cur_gt_bbox[1] + cur_gt_bbox[3]\n                ]\n            cur_gt_class = cur_item[1]\n            cur_gt_mask = coco_mask.decode(cur_item[4])\n\n            assert cur_img_name in result_dict.keys()\n            cur_detections = result_dict[cur_img_name]\n\n            correct_flag = False\n            for i in range(len(cur_detections)):\n                cur_det_confidence = cur_detections[i][0]\n                if cur_det_confidence < score_thr:\n                    break\n                cur_det_class = cur_detections[i][1]\n                if cur_det_class != cur_gt_class:\n                    continue\n                cur_det_mask = cur_detections[i][2]\n                cur_iou = self.mask_iou(cur_det_mask, cur_gt_mask)\n                if cur_iou >= iou_thr:\n                    correct_flag = True\n                    break\n            if correct_flag:\n                correct += 1\n            prog_bar.update()\n        recall = correct / len(gt_ann) * 100\n        return correct, recall\n\n    def mask_iou(self, mask1, mask2):\n        \"\"\"Compute IoU between two masks.\"\"\"\n        mask1_area = np.count_nonzero(mask1 == 1)\n        mask2_area = np.count_nonzero(mask2 == 1)\n        intersection = np.count_nonzero(np.logical_and(mask1 == 1, mask2 == 1))\n        iou = intersection / (mask1_area + mask2_area - intersection)\n        return iou\n"
  },
  {
    "path": "mmdet/datasets/coco_panoptic.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\nimport os\nfrom collections import defaultdict\n\nimport mmcv\nimport numpy as np\nfrom mmcv.utils import print_log\nfrom terminaltables import AsciiTable\n\nfrom mmdet.core import INSTANCE_OFFSET\nfrom .api_wrappers import COCO, pq_compute_multi_core\nfrom .builder import DATASETS\nfrom .coco import CocoDataset\n\ntry:\n    import panopticapi\n    from panopticapi.evaluation import VOID\n    from panopticapi.utils import id2rgb\nexcept ImportError:\n    panopticapi = None\n    id2rgb = None\n    VOID = None\n\n__all__ = ['CocoPanopticDataset']\n\n\nclass COCOPanoptic(COCO):\n    \"\"\"This wrapper is for loading the panoptic style annotation file.\n\n    The format is shown in the CocoPanopticDataset class.\n\n    Args:\n        annotation_file (str): Path of annotation file.\n    \"\"\"\n\n    def __init__(self, annotation_file=None):\n        if panopticapi is None:\n            raise RuntimeError(\n                'panopticapi is not installed, please install it by: '\n                'pip install git+https://github.com/cocodataset/'\n                'panopticapi.git.')\n\n        super(COCOPanoptic, self).__init__(annotation_file)\n\n    def createIndex(self):\n        # create index\n        print('creating index...')\n        # anns stores 'segment_id -> annotation'\n        anns, cats, imgs = {}, {}, {}\n        img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list)\n        if 'annotations' in self.dataset:\n            for ann, img_info in zip(self.dataset['annotations'],\n                                     self.dataset['images']):\n                img_info['segm_file'] = ann['file_name']\n                for seg_ann in ann['segments_info']:\n                    # to match with instance.json\n                    seg_ann['image_id'] = ann['image_id']\n                    seg_ann['height'] = img_info['height']\n                    seg_ann['width'] = img_info['width']\n                    img_to_anns[ann['image_id']].append(seg_ann)\n                    # segment_id is not unique in coco dataset orz...\n                    if seg_ann['id'] in anns.keys():\n                        anns[seg_ann['id']].append(seg_ann)\n                    else:\n                        anns[seg_ann['id']] = [seg_ann]\n\n        if 'images' in self.dataset:\n            for img in self.dataset['images']:\n                imgs[img['id']] = img\n\n        if 'categories' in self.dataset:\n            for cat in self.dataset['categories']:\n                cats[cat['id']] = cat\n\n        if 'annotations' in self.dataset and 'categories' in self.dataset:\n            for ann in self.dataset['annotations']:\n                for seg_ann in ann['segments_info']:\n                    cat_to_imgs[seg_ann['category_id']].append(ann['image_id'])\n\n        print('index created!')\n\n        self.anns = anns\n        self.imgToAnns = img_to_anns\n        self.catToImgs = cat_to_imgs\n        self.imgs = imgs\n        self.cats = cats\n\n    def load_anns(self, ids=[]):\n        \"\"\"Load anns with the specified ids.\n\n        self.anns is a list of annotation lists instead of a\n        list of annotations.\n\n        Args:\n            ids (int array): integer ids specifying anns\n\n        Returns:\n            anns (object array): loaded ann objects\n        \"\"\"\n        anns = []\n\n        if hasattr(ids, '__iter__') and hasattr(ids, '__len__'):\n            # self.anns is a list of annotation lists instead of\n            # a list of annotations\n            for id in ids:\n                anns += self.anns[id]\n            return anns\n        elif type(ids) == int:\n            return self.anns[ids]\n\n\n@DATASETS.register_module()\nclass CocoPanopticDataset(CocoDataset):\n    \"\"\"Coco dataset for Panoptic segmentation.\n\n    The annotation format is shown as follows. The `ann` field is optional\n    for testing.\n\n    .. code-block:: none\n\n        [\n            {\n                'filename': f'{image_id:012}.png',\n                'image_id':9\n                'segments_info': {\n                    [\n                        {\n                            'id': 8345037, (segment_id in panoptic png,\n                                            convert from rgb)\n                            'category_id': 51,\n                            'iscrowd': 0,\n                            'bbox': (x1, y1, w, h),\n                            'area': 24315,\n                            'segmentation': list,(coded mask)\n                        },\n                        ...\n                    }\n                }\n            },\n            ...\n        ]\n\n    Args:\n        ann_file (str): Panoptic segmentation annotation file path.\n        pipeline (list[dict]): Processing pipeline.\n        ins_ann_file (str): Instance segmentation annotation file path.\n            Defaults to None.\n        classes (str | Sequence[str], optional): Specify classes to load.\n            If is None, ``cls.CLASSES`` will be used. Defaults to None.\n        data_root (str, optional): Data root for ``ann_file``,\n            ``ins_ann_file`` ``img_prefix``, ``seg_prefix``, ``proposal_file``\n            if specified. Defaults to None.\n        img_prefix (str, optional): Prefix of path to images. Defaults to ''.\n        seg_prefix (str, optional): Prefix of path to segmentation files.\n            Defaults to None.\n        proposal_file (str, optional): Path to proposal file. Defaults to None.\n        test_mode (bool, optional): If set True, annotation will not be loaded.\n            Defaults to False.\n        filter_empty_gt (bool, optional): If set true, images without bounding\n            boxes of the dataset's classes will be filtered out. This option\n            only works when `test_mode=False`, i.e., we never filter images\n            during tests. Defaults to True.\n        file_client_args (:obj:`mmcv.ConfigDict` | dict): file client args.\n            Defaults to dict(backend='disk').\n    \"\"\"\n    CLASSES = [\n        'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',\n        ' truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',\n        'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',\n        'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n        'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n        'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n        'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',\n        'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n        'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',\n        'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n        'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n        'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n        'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner',\n        'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff',\n        'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light',\n        'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',\n        'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',\n        'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',\n        'wall-wood', 'water-other', 'window-blind', 'window-other',\n        'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',\n        'cabinet-merged', 'table-merged', 'floor-other-merged',\n        'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',\n        'paper-merged', 'food-other-merged', 'building-other-merged',\n        'rock-merged', 'wall-other-merged', 'rug-merged'\n    ]\n    THING_CLASSES = [\n        'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train',\n        'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',\n        'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',\n        'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',\n        'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard',\n        'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',\n        'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork',\n        'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange',\n        'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',\n        'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',\n        'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',\n        'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',\n        'scissors', 'teddy bear', 'hair drier', 'toothbrush'\n    ]\n    STUFF_CLASSES = [\n        'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain',\n        'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house',\n        'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield',\n        'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow',\n        'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile',\n        'wall-wood', 'water-other', 'window-blind', 'window-other',\n        'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged',\n        'cabinet-merged', 'table-merged', 'floor-other-merged',\n        'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged',\n        'paper-merged', 'food-other-merged', 'building-other-merged',\n        'rock-merged', 'wall-other-merged', 'rug-merged'\n    ]\n\n    PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230),\n               (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70),\n               (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0),\n               (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255),\n               (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157),\n               (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118),\n               (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182),\n               (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255),\n               (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255),\n               (134, 134, 103), (145, 148, 174), (255, 208, 186),\n               (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255),\n               (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105),\n               (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149),\n               (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205),\n               (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0),\n               (119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88),\n               (95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118),\n               (219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15),\n               (127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0),\n               (95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122),\n               (191, 162, 208), (255, 255, 128), (147, 211, 203),\n               (150, 100, 100), (168, 171, 172), (146, 112, 198),\n               (210, 170, 100), (92, 136, 89), (218, 88, 184), (241, 129, 0),\n               (217, 17, 255), (124, 74, 181), (70, 70, 70), (255, 228, 255),\n               (154, 208, 0), (193, 0, 92), (76, 91, 113), (255, 180, 195),\n               (106, 154, 176),\n               (230, 150, 140), (60, 143, 255), (128, 64, 128), (92, 82, 55),\n               (254, 212, 124), (73, 77, 174), (255, 160, 98), (255, 255, 255),\n               (104, 84, 109), (169, 164, 131), (225, 199, 255), (137, 54, 74),\n               (135, 158, 223), (7, 246, 231), (107, 255, 200), (58, 41, 149),\n               (183, 121, 142), (255, 73, 97), (107, 142, 35), (190, 153, 153),\n               (146, 139, 141),\n               (70, 130, 180), (134, 199, 156), (209, 226, 140), (96, 36, 108),\n               (96, 96, 96), (64, 170, 64), (152, 251, 152), (208, 229, 228),\n               (206, 186, 171), (152, 161, 64), (116, 112, 0), (0, 114, 143),\n               (102, 102, 156), (250, 141, 255)]\n\n    def __init__(self,\n                 ann_file,\n                 pipeline,\n                 ins_ann_file=None,\n                 classes=None,\n                 data_root=None,\n                 img_prefix='',\n                 seg_prefix=None,\n                 proposal_file=None,\n                 test_mode=False,\n                 filter_empty_gt=True,\n                 file_client_args=dict(backend='disk')):\n        super().__init__(\n            ann_file,\n            pipeline,\n            classes=classes,\n            data_root=data_root,\n            img_prefix=img_prefix,\n            seg_prefix=seg_prefix,\n            proposal_file=proposal_file,\n            test_mode=test_mode,\n            filter_empty_gt=filter_empty_gt,\n            file_client_args=file_client_args)\n        self.ins_ann_file = ins_ann_file\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from COCO Panoptic style annotation file.\n\n        Args:\n            ann_file (str): Path of annotation file.\n\n        Returns:\n            list[dict]: Annotation info from COCO api.\n        \"\"\"\n        self.coco = COCOPanoptic(ann_file)\n        self.cat_ids = self.coco.get_cat_ids()\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.categories = self.coco.cats\n        self.img_ids = self.coco.get_img_ids()\n        data_infos = []\n        for i in self.img_ids:\n            info = self.coco.load_imgs([i])[0]\n            info['filename'] = info['file_name']\n            info['segm_file'] = info['filename'].replace('jpg', 'png')\n            data_infos.append(info)\n        return data_infos\n\n    def get_ann_info(self, idx):\n        \"\"\"Get COCO annotation by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Annotation info of specified index.\n        \"\"\"\n        img_id = self.data_infos[idx]['id']\n        ann_ids = self.coco.get_ann_ids(img_ids=[img_id])\n        ann_info = self.coco.load_anns(ann_ids)\n        # filter out unmatched images\n        ann_info = [i for i in ann_info if i['image_id'] == img_id]\n        return self._parse_ann_info(self.data_infos[idx], ann_info)\n\n    def _parse_ann_info(self, img_info, ann_info):\n        \"\"\"Parse annotations and load panoptic ground truths.\n\n        Args:\n            img_info (int): Image info of an image.\n            ann_info (list[dict]): Annotation info of an image.\n\n        Returns:\n            dict: A dict containing the following keys: bboxes, bboxes_ignore,\n                labels, masks, seg_map.\n        \"\"\"\n        gt_bboxes = []\n        gt_labels = []\n        gt_bboxes_ignore = []\n        gt_mask_infos = []\n\n        for i, ann in enumerate(ann_info):\n            x1, y1, w, h = ann['bbox']\n            if ann['area'] <= 0 or w < 1 or h < 1:\n                continue\n            bbox = [x1, y1, x1 + w, y1 + h]\n\n            category_id = ann['category_id']\n            contiguous_cat_id = self.cat2label[category_id]\n\n            is_thing = self.coco.load_cats(ids=category_id)[0]['isthing']\n            if is_thing:\n                is_crowd = ann.get('iscrowd', False)\n                if not is_crowd:\n                    gt_bboxes.append(bbox)\n                    gt_labels.append(contiguous_cat_id)\n                else:\n                    gt_bboxes_ignore.append(bbox)\n                    is_thing = False\n\n            mask_info = {\n                'id': ann['id'],\n                'category': contiguous_cat_id,\n                'is_thing': is_thing\n            }\n            gt_mask_infos.append(mask_info)\n\n        if gt_bboxes:\n            gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n            gt_labels = np.array(gt_labels, dtype=np.int64)\n        else:\n            gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n            gt_labels = np.array([], dtype=np.int64)\n\n        if gt_bboxes_ignore:\n            gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n        else:\n            gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n        ann = dict(\n            bboxes=gt_bboxes,\n            labels=gt_labels,\n            bboxes_ignore=gt_bboxes_ignore,\n            masks=gt_mask_infos,\n            seg_map=img_info['segm_file'])\n\n        return ann\n\n    def _filter_imgs(self, min_size=32):\n        \"\"\"Filter images too small or without ground truths.\"\"\"\n        ids_with_ann = []\n        # check whether images have legal thing annotations.\n        for lists in self.coco.anns.values():\n            for item in lists:\n                category_id = item['category_id']\n                is_thing = self.coco.load_cats(ids=category_id)[0]['isthing']\n                if not is_thing:\n                    continue\n                ids_with_ann.append(item['image_id'])\n        ids_with_ann = set(ids_with_ann)\n\n        valid_inds = []\n        valid_img_ids = []\n        for i, img_info in enumerate(self.data_infos):\n            img_id = self.img_ids[i]\n            if self.filter_empty_gt and img_id not in ids_with_ann:\n                continue\n            if min(img_info['width'], img_info['height']) >= min_size:\n                valid_inds.append(i)\n                valid_img_ids.append(img_id)\n        self.img_ids = valid_img_ids\n        return valid_inds\n\n    def _pan2json(self, results, outfile_prefix):\n        \"\"\"Convert panoptic results to COCO panoptic json style.\"\"\"\n        label2cat = dict((v, k) for (k, v) in self.cat2label.items())\n        pred_annotations = []\n        outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')\n\n        for idx in range(len(self)):\n            img_id = self.img_ids[idx]\n            segm_file = self.data_infos[idx]['segm_file']\n            pan = results[idx]\n\n            pan_labels = np.unique(pan)\n            segm_info = []\n            for pan_label in pan_labels:\n                sem_label = pan_label % INSTANCE_OFFSET\n                # We reserve the length of self.CLASSES for VOID label\n                if sem_label == len(self.CLASSES):\n                    continue\n                # convert sem_label to json label\n                cat_id = label2cat[sem_label]\n                is_thing = self.categories[cat_id]['isthing']\n                mask = pan == pan_label\n                area = mask.sum()\n                segm_info.append({\n                    'id': int(pan_label),\n                    'category_id': cat_id,\n                    'isthing': is_thing,\n                    'area': int(area)\n                })\n            # evaluation script uses 0 for VOID label.\n            pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID\n            pan = id2rgb(pan).astype(np.uint8)\n            mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file))\n            record = {\n                'image_id': img_id,\n                'segments_info': segm_info,\n                'file_name': segm_file\n            }\n            pred_annotations.append(record)\n        pan_json_results = dict(annotations=pred_annotations)\n        return pan_json_results\n\n    def results2json(self, results, outfile_prefix):\n        \"\"\"Dump the results to a COCO style json file.\n\n        There are 4 types of results: proposals, bbox predictions, mask\n        predictions, panoptic segmentation predictions, and they have\n        different data types. This method will automatically recognize\n        the type, and dump them to json files.\n\n        .. code-block:: none\n\n            [\n                {\n                    'pan_results': np.array, # shape (h, w)\n                    # ins_results which includes bboxes and RLE encoded masks\n                    # is optional.\n                    'ins_results': (list[np.array], list[list[str]])\n                },\n                ...\n            ]\n\n        Args:\n            results (list[dict]): Testing results of the dataset.\n            outfile_prefix (str): The filename prefix of the json files. If the\n                prefix is \"somepath/xxx\", the json files will be named\n                \"somepath/xxx.panoptic.json\", \"somepath/xxx.bbox.json\",\n                \"somepath/xxx.segm.json\"\n\n        Returns:\n            dict[str: str]: Possible keys are \"panoptic\", \"bbox\", \"segm\", \\\n                \"proposal\", and values are corresponding filenames.\n        \"\"\"\n        result_files = dict()\n        # panoptic segmentation results\n        if 'pan_results' in results[0]:\n            pan_results = [result['pan_results'] for result in results]\n            pan_json_results = self._pan2json(pan_results, outfile_prefix)\n            result_files['panoptic'] = f'{outfile_prefix}.panoptic.json'\n            mmcv.dump(pan_json_results, result_files['panoptic'])\n\n        # instance segmentation results\n        if 'ins_results' in results[0]:\n            ins_results = [result['ins_results'] for result in results]\n            bbox_json_results, segm_json_results = self._segm2json(ins_results)\n            result_files['bbox'] = f'{outfile_prefix}.bbox.json'\n            result_files['proposal'] = f'{outfile_prefix}.bbox.json'\n            result_files['segm'] = f'{outfile_prefix}.segm.json'\n            mmcv.dump(bbox_json_results, result_files['bbox'])\n            mmcv.dump(segm_json_results, result_files['segm'])\n\n        return result_files\n\n    def evaluate_pan_json(self,\n                          result_files,\n                          outfile_prefix,\n                          logger=None,\n                          classwise=False,\n                          nproc=32):\n        \"\"\"Evaluate PQ according to the panoptic results json file.\"\"\"\n        imgs = self.coco.imgs\n        gt_json = self.coco.img_ann_map  # image to annotations\n        gt_json = [{\n            'image_id': k,\n            'segments_info': v,\n            'file_name': imgs[k]['segm_file']\n        } for k, v in gt_json.items()]\n        pred_json = mmcv.load(result_files['panoptic'])\n        pred_json = dict(\n            (el['image_id'], el) for el in pred_json['annotations'])\n\n        # match the gt_anns and pred_anns in the same image\n        matched_annotations_list = []\n        for gt_ann in gt_json:\n            img_id = gt_ann['image_id']\n            if img_id not in pred_json.keys():\n                raise Exception('no prediction for the image'\n                                ' with id: {}'.format(img_id))\n            matched_annotations_list.append((gt_ann, pred_json[img_id]))\n\n        gt_folder = self.seg_prefix\n        pred_folder = os.path.join(os.path.dirname(outfile_prefix), 'panoptic')\n\n        pq_stat = pq_compute_multi_core(\n            matched_annotations_list,\n            gt_folder,\n            pred_folder,\n            self.categories,\n            self.file_client,\n            nproc=nproc)\n\n        metrics = [('All', None), ('Things', True), ('Stuff', False)]\n        pq_results = {}\n\n        for name, isthing in metrics:\n            pq_results[name], classwise_results = pq_stat.pq_average(\n                self.categories, isthing=isthing)\n            if name == 'All':\n                pq_results['classwise'] = classwise_results\n\n        classwise_results = None\n        if classwise:\n            classwise_results = {\n                k: v\n                for k, v in zip(self.CLASSES, pq_results['classwise'].values())\n            }\n        print_panoptic_table(pq_results, classwise_results, logger=logger)\n        results = parse_pq_results(pq_results)\n        results['PQ_copypaste'] = (\n            f'{results[\"PQ\"]:.3f} {results[\"SQ\"]:.3f} '\n            f'{results[\"RQ\"]:.3f} '\n            f'{results[\"PQ_th\"]:.3f} {results[\"SQ_th\"]:.3f} '\n            f'{results[\"RQ_th\"]:.3f} '\n            f'{results[\"PQ_st\"]:.3f} {results[\"SQ_st\"]:.3f} '\n            f'{results[\"RQ_st\"]:.3f}')\n\n        return results\n\n    def evaluate(self,\n                 results,\n                 metric='PQ',\n                 logger=None,\n                 jsonfile_prefix=None,\n                 classwise=False,\n                 nproc=32,\n                 **kwargs):\n        \"\"\"Evaluation in COCO Panoptic protocol.\n\n        Args:\n            results (list[dict]): Testing results of the dataset.\n            metric (str | list[str]): Metrics to be evaluated. 'PQ', 'bbox',\n                'segm', 'proposal' are supported. 'pq' will be regarded as 'PQ.\n            logger (logging.Logger | str | None): Logger used for printing\n                related information during evaluation. Default: None.\n            jsonfile_prefix (str | None): The prefix of json files. It includes\n                the file path and the prefix of filename, e.g., \"a/b/prefix\".\n                If not specified, a temp file will be created. Default: None.\n            classwise (bool): Whether to print classwise evaluation results.\n                Default: False.\n            nproc (int): Number of processes for panoptic quality computing.\n                Defaults to 32. When `nproc` exceeds the number of cpu cores,\n                the number of cpu cores is used.\n\n        Returns:\n            dict[str, float]: COCO Panoptic style evaluation metric.\n        \"\"\"\n        metrics = metric if isinstance(metric, list) else [metric]\n        # Compatible with lowercase 'pq'\n        metrics = ['PQ' if metric == 'pq' else metric for metric in metrics]\n        allowed_metrics = ['PQ', 'bbox', 'segm', 'proposal']\n        for metric in metrics:\n            if metric not in allowed_metrics:\n                raise KeyError(f'metric {metric} is not supported')\n\n        result_files, tmp_dir = self.format_results(results, jsonfile_prefix)\n        eval_results = {}\n\n        outfile_prefix = os.path.join(tmp_dir.name, 'results') \\\n            if tmp_dir is not None else jsonfile_prefix\n        if 'PQ' in metrics:\n            eval_pan_results = self.evaluate_pan_json(\n                result_files, outfile_prefix, logger, classwise, nproc=nproc)\n\n            eval_results.update(eval_pan_results)\n            metrics.remove('PQ')\n\n        if (('bbox' in metrics) or ('segm' in metrics)\n                or ('proposal' in metrics)):\n\n            assert 'ins_results' in results[0], 'instance segmentation' \\\n                'results are absent from results'\n\n            assert self.ins_ann_file is not None, 'Annotation '\\\n                'file for instance segmentation or object detection ' \\\n                'shuold not be None'\n\n            coco_gt = COCO(self.ins_ann_file)\n            panoptic_cat_ids = self.cat_ids\n            self.cat_ids = coco_gt.get_cat_ids(cat_names=self.THING_CLASSES)\n\n            eval_ins_results = self.evaluate_det_segm(results, result_files,\n                                                      coco_gt, metrics, logger,\n                                                      classwise, **kwargs)\n            self.cat_ids = panoptic_cat_ids\n            eval_results.update(eval_ins_results)\n\n        if tmp_dir is not None:\n            tmp_dir.cleanup()\n        return eval_results\n\n\ndef parse_pq_results(pq_results):\n    \"\"\"Parse the Panoptic Quality results.\"\"\"\n    result = dict()\n    result['PQ'] = 100 * pq_results['All']['pq']\n    result['SQ'] = 100 * pq_results['All']['sq']\n    result['RQ'] = 100 * pq_results['All']['rq']\n    result['PQ_th'] = 100 * pq_results['Things']['pq']\n    result['SQ_th'] = 100 * pq_results['Things']['sq']\n    result['RQ_th'] = 100 * pq_results['Things']['rq']\n    result['PQ_st'] = 100 * pq_results['Stuff']['pq']\n    result['SQ_st'] = 100 * pq_results['Stuff']['sq']\n    result['RQ_st'] = 100 * pq_results['Stuff']['rq']\n    return result\n\n\ndef print_panoptic_table(pq_results, classwise_results=None, logger=None):\n    \"\"\"Print the panoptic evaluation results table.\n\n    Args:\n        pq_results(dict): The Panoptic Quality results.\n        classwise_results(dict | None): The classwise Panoptic Quality results.\n            The keys are class names and the values are metrics.\n        logger (logging.Logger | str | None): Logger used for printing\n            related information during evaluation. Default: None.\n    \"\"\"\n\n    headers = ['', 'PQ', 'SQ', 'RQ', 'categories']\n    data = [headers]\n    for name in ['All', 'Things', 'Stuff']:\n        numbers = [\n            f'{(pq_results[name][k] * 100):0.3f}' for k in ['pq', 'sq', 'rq']\n        ]\n        row = [name] + numbers + [pq_results[name]['n']]\n        data.append(row)\n    table = AsciiTable(data)\n    print_log('Panoptic Evaluation Results:\\n' + table.table, logger=logger)\n\n    if classwise_results is not None:\n        class_metrics = [(name, ) + tuple(f'{(metrics[k] * 100):0.3f}'\n                                          for k in ['pq', 'sq', 'rq'])\n                         for name, metrics in classwise_results.items()]\n        num_columns = min(8, len(class_metrics) * 4)\n        results_flatten = list(itertools.chain(*class_metrics))\n        headers = ['category', 'PQ', 'SQ', 'RQ'] * (num_columns // 4)\n        results_2d = itertools.zip_longest(\n            *[results_flatten[i::num_columns] for i in range(num_columns)])\n        data = [headers]\n        data += [result for result in results_2d]\n        table = AsciiTable(data)\n        print_log(\n            'Classwise Panoptic Evaluation Results:\\n' + table.table,\n            logger=logger)\n"
  },
  {
    "path": "mmdet/datasets/custom.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport warnings\nfrom collections import OrderedDict\n\nimport mmcv\nimport numpy as np\nfrom mmcv.utils import print_log\nfrom terminaltables import AsciiTable\nfrom torch.utils.data import Dataset\n\nfrom mmdet.core import eval_map, eval_recalls\nfrom .builder import DATASETS\nfrom .pipelines import Compose\n\n\n@DATASETS.register_module()\nclass CustomDataset(Dataset):\n    \"\"\"Custom dataset for detection.\n\n    The annotation format is shown as follows. The `ann` field is optional for\n    testing.\n\n    .. code-block:: none\n\n        [\n            {\n                'filename': 'a.jpg',\n                'width': 1280,\n                'height': 720,\n                'ann': {\n                    'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order.\n                    'labels': <np.ndarray> (n, ),\n                    'bboxes_ignore': <np.ndarray> (k, 4), (optional field)\n                    'labels_ignore': <np.ndarray> (k, 4) (optional field)\n                }\n            },\n            ...\n        ]\n\n    Args:\n        ann_file (str): Annotation file path.\n        pipeline (list[dict]): Processing pipeline.\n        classes (str | Sequence[str], optional): Specify classes to load.\n            If is None, ``cls.CLASSES`` will be used. Default: None.\n        data_root (str, optional): Data root for ``ann_file``,\n            ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified.\n        test_mode (bool, optional): If set True, annotation will not be loaded.\n        filter_empty_gt (bool, optional): If set true, images without bounding\n            boxes of the dataset's classes will be filtered out. This option\n            only works when `test_mode=False`, i.e., we never filter images\n            during tests.\n    \"\"\"\n\n    CLASSES = None\n\n    PALETTE = None\n\n    def __init__(self,\n                 ann_file,\n                 pipeline,\n                 classes=None,\n                 data_root=None,\n                 img_prefix='',\n                 seg_prefix=None,\n                 seg_suffix='.png',\n                 proposal_file=None,\n                 test_mode=False,\n                 filter_empty_gt=True,\n                 file_client_args=dict(backend='disk')):\n        self.ann_file = ann_file\n        self.data_root = data_root\n        self.img_prefix = img_prefix\n        self.seg_prefix = seg_prefix\n        self.seg_suffix = seg_suffix\n        self.proposal_file = proposal_file\n        self.test_mode = test_mode\n        self.filter_empty_gt = filter_empty_gt\n        self.file_client = mmcv.FileClient(**file_client_args)\n        self.CLASSES = self.get_classes(classes)\n\n        # join paths if data_root is specified\n        if self.data_root is not None:\n            if not osp.isabs(self.ann_file):\n                self.ann_file = osp.join(self.data_root, self.ann_file)\n            if not (self.img_prefix is None or osp.isabs(self.img_prefix)):\n                self.img_prefix = osp.join(self.data_root, self.img_prefix)\n            if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)):\n                self.seg_prefix = osp.join(self.data_root, self.seg_prefix)\n            if not (self.proposal_file is None\n                    or osp.isabs(self.proposal_file)):\n                self.proposal_file = osp.join(self.data_root,\n                                              self.proposal_file)\n        # load annotations (and proposals)\n        if hasattr(self.file_client, 'get_local_path'):\n            with self.file_client.get_local_path(self.ann_file) as local_path:\n                self.data_infos = self.load_annotations(local_path)\n        else:\n            warnings.warn(\n                'The used MMCV version does not have get_local_path. '\n                f'We treat the {self.ann_file} as local paths and it '\n                'might cause errors if the path is not a local path. '\n                'Please use MMCV>= 1.3.16 if you meet errors.')\n            self.data_infos = self.load_annotations(self.ann_file)\n\n        if self.proposal_file is not None:\n            if hasattr(self.file_client, 'get_local_path'):\n                with self.file_client.get_local_path(\n                        self.proposal_file) as local_path:\n                    self.proposals = self.load_proposals(local_path)\n            else:\n                warnings.warn(\n                    'The used MMCV version does not have get_local_path. '\n                    f'We treat the {self.ann_file} as local paths and it '\n                    'might cause errors if the path is not a local path. '\n                    'Please use MMCV>= 1.3.16 if you meet errors.')\n                self.proposals = self.load_proposals(self.proposal_file)\n        else:\n            self.proposals = None\n\n        # filter images too small and containing no annotations\n        if not test_mode:\n            valid_inds = self._filter_imgs()\n            self.data_infos = [self.data_infos[i] for i in valid_inds]\n            if self.proposals is not None:\n                self.proposals = [self.proposals[i] for i in valid_inds]\n            # set group flag for the sampler\n            self._set_group_flag()\n\n        # processing pipeline\n        self.pipeline = Compose(pipeline)\n\n    def __len__(self):\n        \"\"\"Total number of samples of data.\"\"\"\n        return len(self.data_infos)\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from annotation file.\"\"\"\n        return mmcv.load(ann_file)\n\n    def load_proposals(self, proposal_file):\n        \"\"\"Load proposal from proposal file.\"\"\"\n        return mmcv.load(proposal_file)\n\n    def get_ann_info(self, idx):\n        \"\"\"Get annotation by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Annotation info of specified index.\n        \"\"\"\n\n        return self.data_infos[idx]['ann']\n\n    def get_cat_ids(self, idx):\n        \"\"\"Get category ids by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            list[int]: All categories in the image of specified index.\n        \"\"\"\n\n        return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist()\n\n    def pre_pipeline(self, results):\n        \"\"\"Prepare results dict for pipeline.\"\"\"\n        results['img_prefix'] = self.img_prefix\n        results['seg_prefix'] = self.seg_prefix\n        results['proposal_file'] = self.proposal_file\n        results['bbox_fields'] = []\n        results['mask_fields'] = []\n        results['seg_fields'] = []\n\n    def _filter_imgs(self, min_size=32):\n        \"\"\"Filter images too small.\"\"\"\n        if self.filter_empty_gt:\n            warnings.warn(\n                'CustomDataset does not support filtering empty gt images.')\n        valid_inds = []\n        for i, img_info in enumerate(self.data_infos):\n            if min(img_info['width'], img_info['height']) >= min_size:\n                valid_inds.append(i)\n        return valid_inds\n\n    def _set_group_flag(self):\n        \"\"\"Set flag according to image aspect ratio.\n\n        Images with aspect ratio greater than 1 will be set as group 1,\n        otherwise group 0.\n        \"\"\"\n        self.flag = np.zeros(len(self), dtype=np.uint8)\n        for i in range(len(self)):\n            img_info = self.data_infos[i]\n            if img_info['width'] / img_info['height'] > 1:\n                self.flag[i] = 1\n\n    def _rand_another(self, idx):\n        \"\"\"Get another random index from the same group as the given index.\"\"\"\n        pool = np.where(self.flag == self.flag[idx])[0]\n        return np.random.choice(pool)\n\n    def __getitem__(self, idx):\n        \"\"\"Get training/test data after pipeline.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Training/test data (with annotation if `test_mode` is set \\\n                True).\n        \"\"\"\n\n        if self.test_mode:\n            return self.prepare_test_img(idx)\n        while True:\n            data = self.prepare_train_img(idx)\n            if data is None:\n                idx = self._rand_another(idx)\n                continue\n            return data\n\n    def prepare_train_img(self, idx):\n        \"\"\"Get training data and annotations after pipeline.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Training data and annotation after pipeline with new keys \\\n                introduced by pipeline.\n        \"\"\"\n\n        img_info = self.data_infos[idx]\n        ann_info = self.get_ann_info(idx)\n        results = dict(img_info=img_info, ann_info=ann_info)\n        if self.proposals is not None:\n            results['proposals'] = self.proposals[idx]\n        self.pre_pipeline(results)\n        return self.pipeline(results)\n\n    def prepare_test_img(self, idx):\n        \"\"\"Get testing data after pipeline.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Testing data after pipeline with new keys introduced by \\\n                pipeline.\n        \"\"\"\n\n        img_info = self.data_infos[idx]\n        results = dict(img_info=img_info)\n        if self.proposals is not None:\n            results['proposals'] = self.proposals[idx]\n        self.pre_pipeline(results)\n        return self.pipeline(results)\n\n    @classmethod\n    def get_classes(cls, classes=None):\n        \"\"\"Get class names of current dataset.\n\n        Args:\n            classes (Sequence[str] | str | None): If classes is None, use\n                default CLASSES defined by builtin dataset. If classes is a\n                string, take it as a file name. The file contains the name of\n                classes where each line contains one class name. If classes is\n                a tuple or list, override the CLASSES defined by the dataset.\n\n        Returns:\n            tuple[str] or list[str]: Names of categories of the dataset.\n        \"\"\"\n        if classes is None:\n            return cls.CLASSES\n\n        if isinstance(classes, str):\n            # take it as a file path\n            class_names = mmcv.list_from_file(classes)\n        elif isinstance(classes, (tuple, list)):\n            class_names = classes\n        else:\n            raise ValueError(f'Unsupported type {type(classes)} of classes.')\n\n        return class_names\n\n    def get_cat2imgs(self):\n        \"\"\"Get a dict with class as key and img_ids as values, which will be\n        used in :class:`ClassAwareSampler`.\n\n        Returns:\n            dict[list]: A dict of per-label image list,\n            the item of the dict indicates a label index,\n            corresponds to the image index that contains the label.\n        \"\"\"\n        if self.CLASSES is None:\n            raise ValueError('self.CLASSES can not be None')\n        # sort the label index\n        cat2imgs = {i: [] for i in range(len(self.CLASSES))}\n        for i in range(len(self)):\n            cat_ids = set(self.get_cat_ids(i))\n            for cat in cat_ids:\n                cat2imgs[cat].append(i)\n        return cat2imgs\n\n    def format_results(self, results, **kwargs):\n        \"\"\"Place holder to format result to dataset specific output.\"\"\"\n\n    def evaluate(self,\n                 results,\n                 metric='mAP',\n                 logger=None,\n                 proposal_nums=(100, 300, 1000),\n                 iou_thr=0.5,\n                 scale_ranges=None):\n        \"\"\"Evaluate the dataset.\n\n        Args:\n            results (list): Testing results of the dataset.\n            metric (str | list[str]): Metrics to be evaluated.\n            logger (logging.Logger | None | str): Logger used for printing\n                related information during evaluation. Default: None.\n            proposal_nums (Sequence[int]): Proposal number used for evaluating\n                recalls, such as recall@100, recall@1000.\n                Default: (100, 300, 1000).\n            iou_thr (float | list[float]): IoU threshold. Default: 0.5.\n            scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP.\n                Default: None.\n        \"\"\"\n\n        if not isinstance(metric, str):\n            assert len(metric) == 1\n            metric = metric[0]\n        allowed_metrics = ['mAP', 'recall']\n        if metric not in allowed_metrics:\n            raise KeyError(f'metric {metric} is not supported')\n        annotations = [self.get_ann_info(i) for i in range(len(self))]\n        eval_results = OrderedDict()\n        iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr\n        if metric == 'mAP':\n            assert isinstance(iou_thrs, list)\n            mean_aps = []\n            for iou_thr in iou_thrs:\n                print_log(f'\\n{\"-\" * 15}iou_thr: {iou_thr}{\"-\" * 15}')\n                mean_ap, _ = eval_map(\n                    results,\n                    annotations,\n                    scale_ranges=scale_ranges,\n                    iou_thr=iou_thr,\n                    dataset=self.CLASSES,\n                    logger=logger)\n                mean_aps.append(mean_ap)\n                eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)\n            eval_results['mAP'] = sum(mean_aps) / len(mean_aps)\n        elif metric == 'recall':\n            gt_bboxes = [ann['bboxes'] for ann in annotations]\n            recalls = eval_recalls(\n                gt_bboxes, results, proposal_nums, iou_thr, logger=logger)\n            for i, num in enumerate(proposal_nums):\n                for j, iou in enumerate(iou_thrs):\n                    eval_results[f'recall@{num}@{iou}'] = recalls[i, j]\n            if recalls.shape[1] > 1:\n                ar = recalls.mean(axis=1)\n                for i, num in enumerate(proposal_nums):\n                    eval_results[f'AR@{num}'] = ar[i]\n        return eval_results\n\n    def __repr__(self):\n        \"\"\"Print the number of instance number.\"\"\"\n        dataset_type = 'Test' if self.test_mode else 'Train'\n        result = (f'\\n{self.__class__.__name__} {dataset_type} dataset '\n                  f'with number of images {len(self)}, '\n                  f'and instance counts: \\n')\n        if self.CLASSES is None:\n            result += 'Category names are not provided. \\n'\n            return result\n        instance_count = np.zeros(len(self.CLASSES) + 1).astype(int)\n        # count the instance number in each image\n        for idx in range(len(self)):\n            label = self.get_ann_info(idx)['labels']\n            unique, counts = np.unique(label, return_counts=True)\n            if len(unique) > 0:\n                # add the occurrence number to each class\n                instance_count[unique] += counts\n            else:\n                # background is the last index\n                instance_count[-1] += 1\n        # create a table with category count\n        table_data = [['category', 'count'] * 5]\n        row_data = []\n        for cls, count in enumerate(instance_count):\n            if cls < len(self.CLASSES):\n                row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}']\n            else:\n                # add the background number\n                row_data += ['-1 background', f'{count}']\n            if len(row_data) == 10:\n                table_data.append(row_data)\n                row_data = []\n        if len(row_data) >= 2:\n            if row_data[-1] == '0':\n                row_data = row_data[:-2]\n            if len(row_data) >= 2:\n                table_data.append([])\n                table_data.append(row_data)\n\n        table = AsciiTable(table_data)\n        result += table.table\n        return result\n"
  },
  {
    "path": "mmdet/datasets/dataset_wrappers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport bisect\nimport collections\nimport copy\nimport math\nfrom collections import defaultdict\n\nimport numpy as np\nfrom mmcv.utils import build_from_cfg, print_log\nfrom torch.utils.data.dataset import ConcatDataset as _ConcatDataset\n\nfrom .builder import DATASETS, PIPELINES\nfrom .coco import CocoDataset\n\n\n@DATASETS.register_module()\nclass ConcatDataset(_ConcatDataset):\n    \"\"\"A wrapper of concatenated dataset.\n\n    Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but\n    concat the group flag for image aspect ratio.\n\n    Args:\n        datasets (list[:obj:`Dataset`]): A list of datasets.\n        separate_eval (bool): Whether to evaluate the results\n            separately if it is used as validation dataset.\n            Defaults to True.\n    \"\"\"\n\n    def __init__(self, datasets, separate_eval=True):\n        super(ConcatDataset, self).__init__(datasets)\n        self.CLASSES = datasets[0].CLASSES\n        self.PALETTE = getattr(datasets[0], 'PALETTE', None)\n        self.separate_eval = separate_eval\n        if not separate_eval:\n            if any([isinstance(ds, CocoDataset) for ds in datasets]):\n                raise NotImplementedError(\n                    'Evaluating concatenated CocoDataset as a whole is not'\n                    ' supported! Please set \"separate_eval=True\"')\n            elif len(set([type(ds) for ds in datasets])) != 1:\n                raise NotImplementedError(\n                    'All the datasets should have same types')\n\n        if hasattr(datasets[0], 'flag'):\n            flags = []\n            for i in range(0, len(datasets)):\n                flags.append(datasets[i].flag)\n            self.flag = np.concatenate(flags)\n\n    def get_cat_ids(self, idx):\n        \"\"\"Get category ids of concatenated dataset by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            list[int]: All categories in the image of specified index.\n        \"\"\"\n\n        if idx < 0:\n            if -idx > len(self):\n                raise ValueError(\n                    'absolute value of index should not exceed dataset length')\n            idx = len(self) + idx\n        dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)\n        if dataset_idx == 0:\n            sample_idx = idx\n        else:\n            sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]\n        return self.datasets[dataset_idx].get_cat_ids(sample_idx)\n\n    def get_ann_info(self, idx):\n        \"\"\"Get annotation of concatenated dataset by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Annotation info of specified index.\n        \"\"\"\n\n        if idx < 0:\n            if -idx > len(self):\n                raise ValueError(\n                    'absolute value of index should not exceed dataset length')\n            idx = len(self) + idx\n        dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)\n        if dataset_idx == 0:\n            sample_idx = idx\n        else:\n            sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]\n        return self.datasets[dataset_idx].get_ann_info(sample_idx)\n\n    def evaluate(self, results, logger=None, **kwargs):\n        \"\"\"Evaluate the results.\n\n        Args:\n            results (list[list | tuple]): Testing results of the dataset.\n            logger (logging.Logger | str | None): Logger used for printing\n                related information during evaluation. Default: None.\n\n        Returns:\n            dict[str: float]: AP results of the total dataset or each separate\n            dataset if `self.separate_eval=True`.\n        \"\"\"\n        assert len(results) == self.cumulative_sizes[-1], \\\n            ('Dataset and results have different sizes: '\n             f'{self.cumulative_sizes[-1]} v.s. {len(results)}')\n\n        # Check whether all the datasets support evaluation\n        for dataset in self.datasets:\n            assert hasattr(dataset, 'evaluate'), \\\n                f'{type(dataset)} does not implement evaluate function'\n\n        if self.separate_eval:\n            dataset_idx = -1\n            total_eval_results = dict()\n            for size, dataset in zip(self.cumulative_sizes, self.datasets):\n                start_idx = 0 if dataset_idx == -1 else \\\n                    self.cumulative_sizes[dataset_idx]\n                end_idx = self.cumulative_sizes[dataset_idx + 1]\n\n                results_per_dataset = results[start_idx:end_idx]\n                print_log(\n                    f'\\nEvaluating {dataset.ann_file} with '\n                    f'{len(results_per_dataset)} images now',\n                    logger=logger)\n\n                eval_results_per_dataset = dataset.evaluate(\n                    results_per_dataset, logger=logger, **kwargs)\n                dataset_idx += 1\n                for k, v in eval_results_per_dataset.items():\n                    total_eval_results.update({f'{dataset_idx}_{k}': v})\n\n            return total_eval_results\n        elif any([isinstance(ds, CocoDataset) for ds in self.datasets]):\n            raise NotImplementedError(\n                'Evaluating concatenated CocoDataset as a whole is not'\n                ' supported! Please set \"separate_eval=True\"')\n        elif len(set([type(ds) for ds in self.datasets])) != 1:\n            raise NotImplementedError(\n                'All the datasets should have same types')\n        else:\n            original_data_infos = self.datasets[0].data_infos\n            self.datasets[0].data_infos = sum(\n                [dataset.data_infos for dataset in self.datasets], [])\n            eval_results = self.datasets[0].evaluate(\n                results, logger=logger, **kwargs)\n            self.datasets[0].data_infos = original_data_infos\n            return eval_results\n\n\n@DATASETS.register_module()\nclass RepeatDataset:\n    \"\"\"A wrapper of repeated dataset.\n\n    The length of repeated dataset will be `times` larger than the original\n    dataset. This is useful when the data loading time is long but the dataset\n    is small. Using RepeatDataset can reduce the data loading time between\n    epochs.\n\n    Args:\n        dataset (:obj:`Dataset`): The dataset to be repeated.\n        times (int): Repeat times.\n    \"\"\"\n\n    def __init__(self, dataset, times):\n        self.dataset = dataset\n        self.times = times\n        self.CLASSES = dataset.CLASSES\n        self.PALETTE = getattr(dataset, 'PALETTE', None)\n        if hasattr(self.dataset, 'flag'):\n            self.flag = np.tile(self.dataset.flag, times)\n\n        self._ori_len = len(self.dataset)\n\n    def __getitem__(self, idx):\n        return self.dataset[idx % self._ori_len]\n\n    def get_cat_ids(self, idx):\n        \"\"\"Get category ids of repeat dataset by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            list[int]: All categories in the image of specified index.\n        \"\"\"\n\n        return self.dataset.get_cat_ids(idx % self._ori_len)\n\n    def get_ann_info(self, idx):\n        \"\"\"Get annotation of repeat dataset by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Annotation info of specified index.\n        \"\"\"\n\n        return self.dataset.get_ann_info(idx % self._ori_len)\n\n    def __len__(self):\n        \"\"\"Length after repetition.\"\"\"\n        return self.times * self._ori_len\n\n\n# Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa\n@DATASETS.register_module()\nclass ClassBalancedDataset:\n    \"\"\"A wrapper of repeated dataset with repeat factor.\n\n    Suitable for training on class imbalanced datasets like LVIS. Following\n    the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_,\n    in each epoch, an image may appear multiple times based on its\n    \"repeat factor\".\n    The repeat factor for an image is a function of the frequency the rarest\n    category labeled in that image. The \"frequency of category c\" in [0, 1]\n    is defined by the fraction of images in the training set (without repeats)\n    in which category c appears.\n    The dataset needs to instantiate :func:`self.get_cat_ids` to support\n    ClassBalancedDataset.\n\n    The repeat factor is computed as followed.\n\n    1. For each category c, compute the fraction # of images\n       that contain it: :math:`f(c)`\n    2. For each category c, compute the category-level repeat factor:\n       :math:`r(c) = max(1, sqrt(t/f(c)))`\n    3. For each image I, compute the image-level repeat factor:\n       :math:`r(I) = max_{c in I} r(c)`\n\n    Args:\n        dataset (:obj:`CustomDataset`): The dataset to be repeated.\n        oversample_thr (float): frequency threshold below which data is\n            repeated. For categories with ``f_c >= oversample_thr``, there is\n            no oversampling. For categories with ``f_c < oversample_thr``, the\n            degree of oversampling following the square-root inverse frequency\n            heuristic above.\n        filter_empty_gt (bool, optional): If set true, images without bounding\n            boxes will not be oversampled. Otherwise, they will be categorized\n            as the pure background class and involved into the oversampling.\n            Default: True.\n    \"\"\"\n\n    def __init__(self, dataset, oversample_thr, filter_empty_gt=True):\n        self.dataset = dataset\n        self.oversample_thr = oversample_thr\n        self.filter_empty_gt = filter_empty_gt\n        self.CLASSES = dataset.CLASSES\n        self.PALETTE = getattr(dataset, 'PALETTE', None)\n\n        repeat_factors = self._get_repeat_factors(dataset, oversample_thr)\n        repeat_indices = []\n        for dataset_idx, repeat_factor in enumerate(repeat_factors):\n            repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor))\n        self.repeat_indices = repeat_indices\n\n        flags = []\n        if hasattr(self.dataset, 'flag'):\n            for flag, repeat_factor in zip(self.dataset.flag, repeat_factors):\n                flags.extend([flag] * int(math.ceil(repeat_factor)))\n            assert len(flags) == len(repeat_indices)\n        self.flag = np.asarray(flags, dtype=np.uint8)\n\n    def _get_repeat_factors(self, dataset, repeat_thr):\n        \"\"\"Get repeat factor for each images in the dataset.\n\n        Args:\n            dataset (:obj:`CustomDataset`): The dataset\n            repeat_thr (float): The threshold of frequency. If an image\n                contains the categories whose frequency below the threshold,\n                it would be repeated.\n\n        Returns:\n            list[float]: The repeat factors for each images in the dataset.\n        \"\"\"\n\n        # 1. For each category c, compute the fraction # of images\n        #   that contain it: f(c)\n        category_freq = defaultdict(int)\n        num_images = len(dataset)\n        for idx in range(num_images):\n            cat_ids = set(self.dataset.get_cat_ids(idx))\n            if len(cat_ids) == 0 and not self.filter_empty_gt:\n                cat_ids = set([len(self.CLASSES)])\n            for cat_id in cat_ids:\n                category_freq[cat_id] += 1\n        for k, v in category_freq.items():\n            category_freq[k] = v / num_images\n\n        # 2. For each category c, compute the category-level repeat factor:\n        #    r(c) = max(1, sqrt(t/f(c)))\n        category_repeat = {\n            cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))\n            for cat_id, cat_freq in category_freq.items()\n        }\n\n        # 3. For each image I, compute the image-level repeat factor:\n        #    r(I) = max_{c in I} r(c)\n        repeat_factors = []\n        for idx in range(num_images):\n            cat_ids = set(self.dataset.get_cat_ids(idx))\n            if len(cat_ids) == 0 and not self.filter_empty_gt:\n                cat_ids = set([len(self.CLASSES)])\n            repeat_factor = 1\n            if len(cat_ids) > 0:\n                repeat_factor = max(\n                    {category_repeat[cat_id]\n                     for cat_id in cat_ids})\n            repeat_factors.append(repeat_factor)\n\n        return repeat_factors\n\n    def __getitem__(self, idx):\n        ori_index = self.repeat_indices[idx]\n        return self.dataset[ori_index]\n\n    def get_ann_info(self, idx):\n        \"\"\"Get annotation of dataset by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Annotation info of specified index.\n        \"\"\"\n        ori_index = self.repeat_indices[idx]\n        return self.dataset.get_ann_info(ori_index)\n\n    def __len__(self):\n        \"\"\"Length after repetition.\"\"\"\n        return len(self.repeat_indices)\n\n\n@DATASETS.register_module()\nclass MultiImageMixDataset:\n    \"\"\"A wrapper of multiple images mixed dataset.\n\n    Suitable for training on multiple images mixed data augmentation like\n    mosaic and mixup. For the augmentation pipeline of mixed image data,\n    the `get_indexes` method needs to be provided to obtain the image\n    indexes, and you can set `skip_flags` to change the pipeline running\n    process. At the same time, we provide the `dynamic_scale` parameter\n    to dynamically change the output image size.\n\n    Args:\n        dataset (:obj:`CustomDataset`): The dataset to be mixed.\n        pipeline (Sequence[dict]): Sequence of transform object or\n            config dict to be composed.\n        dynamic_scale (tuple[int], optional): The image scale can be changed\n            dynamically. Default to None. It is deprecated.\n        skip_type_keys (list[str], optional): Sequence of type string to\n            be skip pipeline. Default to None.\n        max_refetch (int): The maximum number of retry iterations for getting\n            valid results from the pipeline. If the number of iterations is\n            greater than `max_refetch`, but results is still None, then the\n            iteration is terminated and raise the error. Default: 15.\n    \"\"\"\n\n    def __init__(self,\n                 dataset,\n                 pipeline,\n                 dynamic_scale=None,\n                 skip_type_keys=None,\n                 max_refetch=15):\n        if dynamic_scale is not None:\n            raise RuntimeError(\n                'dynamic_scale is deprecated. Please use Resize pipeline '\n                'to achieve similar functions')\n        assert isinstance(pipeline, collections.abc.Sequence)\n        if skip_type_keys is not None:\n            assert all([\n                isinstance(skip_type_key, str)\n                for skip_type_key in skip_type_keys\n            ])\n        self._skip_type_keys = skip_type_keys\n\n        self.pipeline = []\n        self.pipeline_types = []\n        for transform in pipeline:\n            if isinstance(transform, dict):\n                self.pipeline_types.append(transform['type'])\n                transform = build_from_cfg(transform, PIPELINES)\n                self.pipeline.append(transform)\n            else:\n                raise TypeError('pipeline must be a dict')\n\n        self.dataset = dataset\n        self.CLASSES = dataset.CLASSES\n        self.PALETTE = getattr(dataset, 'PALETTE', None)\n        if hasattr(self.dataset, 'flag'):\n            self.flag = dataset.flag\n        self.num_samples = len(dataset)\n        self.max_refetch = max_refetch\n\n    def __len__(self):\n        return self.num_samples\n\n    def __getitem__(self, idx):\n        results = copy.deepcopy(self.dataset[idx])\n        for (transform, transform_type) in zip(self.pipeline,\n                                               self.pipeline_types):\n            if self._skip_type_keys is not None and \\\n                    transform_type in self._skip_type_keys:\n                continue\n\n            if hasattr(transform, 'get_indexes'):\n                for i in range(self.max_refetch):\n                    # Make sure the results passed the loading pipeline\n                    # of the original dataset is not None.\n                    indexes = transform.get_indexes(self.dataset)\n                    if not isinstance(indexes, collections.abc.Sequence):\n                        indexes = [indexes]\n                    mix_results = [\n                        copy.deepcopy(self.dataset[index]) for index in indexes\n                    ]\n                    if None not in mix_results:\n                        results['mix_results'] = mix_results\n                        break\n                else:\n                    raise RuntimeError(\n                        'The loading pipeline of the original dataset'\n                        ' always return None. Please check the correctness '\n                        'of the dataset and its pipeline.')\n\n            for i in range(self.max_refetch):\n                # To confirm the results passed the training pipeline\n                # of the wrapper is not None.\n                updated_results = transform(copy.deepcopy(results))\n                if updated_results is not None:\n                    results = updated_results\n                    break\n            else:\n                raise RuntimeError(\n                    'The training pipeline of the dataset wrapper'\n                    ' always return None.Please check the correctness '\n                    'of the dataset and its pipeline.')\n\n            if 'mix_results' in results:\n                results.pop('mix_results')\n\n        return results\n\n    def update_skip_type_keys(self, skip_type_keys):\n        \"\"\"Update skip_type_keys. It is called by an external hook.\n\n        Args:\n            skip_type_keys (list[str], optional): Sequence of type\n                string to be skip pipeline.\n        \"\"\"\n        assert all([\n            isinstance(skip_type_key, str) for skip_type_key in skip_type_keys\n        ])\n        self._skip_type_keys = skip_type_keys\n"
  },
  {
    "path": "mmdet/datasets/deepfashion.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .builder import DATASETS\nfrom .coco import CocoDataset\n\n\n@DATASETS.register_module()\nclass DeepFashionDataset(CocoDataset):\n\n    CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag',\n               'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair',\n               'skin', 'face')\n\n    PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64),\n               (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96),\n               (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192),\n               (128, 0, 96), (128, 0, 192), (0, 32, 192)]\n"
  },
  {
    "path": "mmdet/datasets/lvis.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\nimport logging\nimport os.path as osp\nimport tempfile\nimport warnings\nfrom collections import OrderedDict\n\nimport numpy as np\nfrom mmcv.utils import print_log\nfrom terminaltables import AsciiTable\n\nfrom .builder import DATASETS\nfrom .coco import CocoDataset\n\n\n@DATASETS.register_module()\nclass LVISV05Dataset(CocoDataset):\n\n    CLASSES = (\n        'acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock',\n        'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet',\n        'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron',\n        'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke',\n        'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award',\n        'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack',\n        'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball',\n        'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage',\n        'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel',\n        'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat',\n        'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop',\n        'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel',\n        'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead',\n        'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed',\n        'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can',\n        'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench',\n        'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder', 'binoculars',\n        'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse',\n        'birthday_cake', 'birthday_card', 'biscuit_(bread)', 'pirate_flag',\n        'black_sheep', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp',\n        'blinker', 'blueberry', 'boar', 'gameboard', 'boat', 'bobbin',\n        'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet',\n        'book', 'book_bag', 'bookcase', 'booklet', 'bookmark',\n        'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet',\n        'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl',\n        'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin',\n        'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',\n        'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase',\n        'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie',\n        'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull',\n        'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board',\n        'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed',\n        'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife',\n        'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',\n        'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',\n        'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',\n        'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder',\n        'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon',\n        'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap',\n        'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)',\n        'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan',\n        'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag',\n        'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast',\n        'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player',\n        'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue',\n        'champagne', 'chandelier', 'chap', 'checkbook', 'checkerboard',\n        'cherry', 'chessboard', 'chest_of_drawers_(furniture)',\n        'chicken_(animal)', 'chicken_wire', 'chickpea', 'Chihuahua',\n        'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)',\n        'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk',\n        'chocolate_mousse', 'choker', 'chopping_board', 'chopstick',\n        'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette',\n        'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent',\n        'clementine', 'clip', 'clipboard', 'clock', 'clock_tower',\n        'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat',\n        'coat_hanger', 'coatrack', 'cock', 'coconut', 'coffee_filter',\n        'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin',\n        'colander', 'coleslaw', 'coloring_material', 'combination_lock',\n        'pacifier', 'comic_book', 'computer_keyboard', 'concrete_mixer',\n        'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cookie',\n        'cookie_jar', 'cooking_utensil', 'cooler_(for_food)',\n        'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn',\n        'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset',\n        'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell',\n        'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon',\n        'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot',\n        'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship',\n        'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube',\n        'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler',\n        'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool',\n        'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard',\n        'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',\n        'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',\n        'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',\n        'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog',\n        'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask',\n        'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',\n        'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',\n        'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper',\n        'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling',\n        'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan',\n        'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel',\n        'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',\n        'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',\n        'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',\n        'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',\n        'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',\n        'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',\n        'fireplug', 'fish', 'fish_(food)', 'fishbowl', 'fishing_boat',\n        'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flash',\n        'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)',\n        'flower_arrangement', 'flute_glass', 'foal', 'folding_chair',\n        'food_processor', 'football_(American)', 'football_helmet',\n        'footstool', 'fork', 'forklift', 'freight_car', 'French_toast',\n        'freshener', 'frisbee', 'frog', 'fruit_juice', 'fruit_salad',\n        'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',\n        'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',\n        'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda',\n        'gift_wrap', 'ginger', 'giraffe', 'cincture',\n        'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',\n        'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',\n        'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater',\n        'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle',\n        'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag',\n        'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush',\n        'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock',\n        'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',\n        'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',\n        'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil',\n        'headband', 'headboard', 'headlight', 'headscarf', 'headset',\n        'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater',\n        'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus',\n        'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood',\n        'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',\n        'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',\n        'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',\n        'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod',\n        'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean',\n        'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick',\n        'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard',\n        'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten',\n        'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)',\n        'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat',\n        'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp',\n        'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer',\n        'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)',\n        'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy',\n        'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine',\n        'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard',\n        'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion',\n        'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine',\n        'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth',\n        'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini',\n        'mascot', 'mashed_potato', 'masher', 'mask', 'mast',\n        'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup',\n        'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone',\n        'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan',\n        'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money',\n        'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',\n        'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle',\n        'mound_(baseball)', 'mouse_(animal_rodent)',\n        'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',\n        'music_stool', 'musical_instrument', 'nailfile', 'nameplate', 'napkin',\n        'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newsstand',\n        'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)',\n        'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)',\n        'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion',\n        'orange_(fruit)', 'orange_juice', 'oregano', 'ostrich', 'ottoman',\n        'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle',\n        'padlock', 'paintbox', 'paintbrush', 'painting', 'pajamas', 'palette',\n        'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose',\n        'papaya', 'paperclip', 'paper_plate', 'paper_towel', 'paperback_book',\n        'paperweight', 'parachute', 'parakeet', 'parasail_(sports)',\n        'parchment', 'parka', 'parking_meter', 'parrot',\n        'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',\n        'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',\n        'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard',\n        'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener',\n        'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper',\n        'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood',\n        'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',\n        'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',\n        'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',\n        'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',\n        'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',\n        'plate', 'platter', 'playing_card', 'playpen', 'pliers',\n        'plow_(farm_equipment)', 'pocket_watch', 'pocketknife',\n        'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt',\n        'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait',\n        'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',\n        'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'printer',\n        'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding',\n        'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet',\n        'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car',\n        'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft',\n        'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',\n        'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',\n        'recliner', 'record_player', 'red_cabbage', 'reflector',\n        'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring',\n        'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate',\n        'Rollerblade', 'rolling_pin', 'root_beer',\n        'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)',\n        'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag',\n        'safety_pin', 'sail', 'salad', 'salad_plate', 'salami',\n        'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker',\n        'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer',\n        'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)',\n        'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard',\n        'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver',\n        'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',\n        'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker',\n        'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)',\n        'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog',\n        'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart',\n        'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head',\n        'shower_curtain', 'shredder_(for_paper)', 'sieve', 'signboard', 'silo',\n        'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka',\n        'ski_pole', 'skirt', 'sled', 'sleeping_bag', 'sling_(bandage)',\n        'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',\n        'snowmobile', 'soap', 'soccer_ball', 'sock', 'soda_fountain',\n        'carbonated_water', 'sofa', 'softball', 'solar_array', 'sombrero',\n        'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk',\n        'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear',\n        'spectacles', 'spice_rack', 'spider', 'sponge', 'spoon', 'sportswear',\n        'spotlight', 'squirrel', 'stapler_(stapling_machine)', 'starfish',\n        'statue_(sculpture)', 'steak_(food)', 'steak_knife',\n        'steamer_(kitchen_appliance)', 'steering_wheel', 'stencil',\n        'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',\n        'stirrup', 'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light',\n        'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry',\n        'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer',\n        'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower',\n        'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop',\n        'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato',\n        'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table',\n        'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag',\n        'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)',\n        'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',\n        'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',\n        'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',\n        'telephone_pole', 'telephoto_lens', 'television_camera',\n        'television_set', 'tennis_ball', 'tennis_racket', 'tequila',\n        'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',\n        'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',\n        'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',\n        'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',\n        'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',\n        'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',\n        'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',\n        'tray', 'tree_house', 'trench_coat', 'triangle_(musical_instrument)',\n        'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)',\n        'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip',\n        'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella',\n        'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve',\n        'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin',\n        'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon',\n        'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet',\n        'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch',\n        'water_bottle', 'water_cooler', 'water_faucet', 'water_filter',\n        'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski',\n        'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam',\n        'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair',\n        'whipped_cream', 'whiskey', 'whistle', 'wick', 'wig', 'wind_chime',\n        'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock',\n        'wine_bottle', 'wine_bucket', 'wineglass', 'wing_chair',\n        'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath',\n        'wrench', 'wristband', 'wristlet', 'yacht', 'yak', 'yogurt',\n        'yoke_(animal_equipment)', 'zebra', 'zucchini')\n\n    PALETTE = None\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from lvis style annotation file.\n\n        Args:\n            ann_file (str): Path of annotation file.\n\n        Returns:\n            list[dict]: Annotation info from LVIS api.\n        \"\"\"\n\n        try:\n            import lvis\n            if getattr(lvis, '__version__', '0') >= '10.5.3':\n                warnings.warn(\n                    'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"',  # noqa: E501\n                    UserWarning)\n            from lvis import LVIS\n        except ImportError:\n            raise ImportError(\n                'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".'  # noqa: E501\n            )\n        self.coco = LVIS(ann_file)\n        self.cat_ids = self.coco.get_cat_ids()\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.img_ids = self.coco.get_img_ids()\n        data_infos = []\n        for i in self.img_ids:\n            info = self.coco.load_imgs([i])[0]\n            if info['file_name'].startswith('COCO'):\n                # Convert form the COCO 2014 file naming convention of\n                # COCO_[train/val/test]2014_000000000000.jpg to the 2017\n                # naming convention of 000000000000.jpg\n                # (LVIS v1 will fix this naming issue)\n                info['filename'] = info['file_name'][-16:]\n            else:\n                info['filename'] = info['file_name']\n            data_infos.append(info)\n        return data_infos\n\n    def evaluate(self,\n                 results,\n                 metric='bbox',\n                 logger=None,\n                 jsonfile_prefix=None,\n                 classwise=False,\n                 proposal_nums=(100, 300, 1000),\n                 iou_thrs=np.arange(0.5, 0.96, 0.05)):\n        \"\"\"Evaluation in LVIS protocol.\n\n        Args:\n            results (list[list | tuple]): Testing results of the dataset.\n            metric (str | list[str]): Metrics to be evaluated. Options are\n                'bbox', 'segm', 'proposal', 'proposal_fast'.\n            logger (logging.Logger | str | None): Logger used for printing\n                related information during evaluation. Default: None.\n            jsonfile_prefix (str | None):\n            classwise (bool): Whether to evaluating the AP for each class.\n            proposal_nums (Sequence[int]): Proposal number used for evaluating\n                recalls, such as recall@100, recall@1000.\n                Default: (100, 300, 1000).\n            iou_thrs (Sequence[float]): IoU threshold used for evaluating\n                recalls. If set to a list, the average recall of all IoUs will\n                also be computed. Default: 0.5.\n\n        Returns:\n            dict[str, float]: LVIS style metrics.\n        \"\"\"\n\n        try:\n            import lvis\n            if getattr(lvis, '__version__', '0') >= '10.5.3':\n                warnings.warn(\n                    'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"',  # noqa: E501\n                    UserWarning)\n            from lvis import LVISEval, LVISResults\n        except ImportError:\n            raise ImportError(\n                'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".'  # noqa: E501\n            )\n        assert isinstance(results, list), 'results must be a list'\n        assert len(results) == len(self), (\n            'The length of results is not equal to the dataset len: {} != {}'.\n            format(len(results), len(self)))\n\n        metrics = metric if isinstance(metric, list) else [metric]\n        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']\n        for metric in metrics:\n            if metric not in allowed_metrics:\n                raise KeyError('metric {} is not supported'.format(metric))\n\n        if jsonfile_prefix is None:\n            tmp_dir = tempfile.TemporaryDirectory()\n            jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n        else:\n            tmp_dir = None\n        result_files = self.results2json(results, jsonfile_prefix)\n\n        eval_results = OrderedDict()\n        # get original api\n        lvis_gt = self.coco\n        for metric in metrics:\n            msg = 'Evaluating {}...'.format(metric)\n            if logger is None:\n                msg = '\\n' + msg\n            print_log(msg, logger=logger)\n\n            if metric == 'proposal_fast':\n                ar = self.fast_eval_recall(\n                    results, proposal_nums, iou_thrs, logger='silent')\n                log_msg = []\n                for i, num in enumerate(proposal_nums):\n                    eval_results['AR@{}'.format(num)] = ar[i]\n                    log_msg.append('\\nAR@{}\\t{:.4f}'.format(num, ar[i]))\n                log_msg = ''.join(log_msg)\n                print_log(log_msg, logger=logger)\n                continue\n\n            if metric not in result_files:\n                raise KeyError('{} is not in results'.format(metric))\n            try:\n                lvis_dt = LVISResults(lvis_gt, result_files[metric])\n            except IndexError:\n                print_log(\n                    'The testing results of the whole dataset is empty.',\n                    logger=logger,\n                    level=logging.ERROR)\n                break\n\n            iou_type = 'bbox' if metric == 'proposal' else metric\n            lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type)\n            lvis_eval.params.imgIds = self.img_ids\n            if metric == 'proposal':\n                lvis_eval.params.useCats = 0\n                lvis_eval.params.maxDets = list(proposal_nums)\n                lvis_eval.evaluate()\n                lvis_eval.accumulate()\n                lvis_eval.summarize()\n                for k, v in lvis_eval.get_results().items():\n                    if k.startswith('AR'):\n                        val = float('{:.4f}'.format(float(v)))\n                        eval_results[k] = val\n            else:\n                lvis_eval.evaluate()\n                lvis_eval.accumulate()\n                lvis_eval.summarize()\n                lvis_results = lvis_eval.get_results()\n                if classwise:  # Compute per-category AP\n                    # Compute per-category AP\n                    # from https://github.com/facebookresearch/detectron2/\n                    precisions = lvis_eval.eval['precision']\n                    # precision: (iou, recall, cls, area range, max dets)\n                    assert len(self.cat_ids) == precisions.shape[2]\n\n                    results_per_category = []\n                    for idx, catId in enumerate(self.cat_ids):\n                        # area range index 0: all area ranges\n                        # max dets index -1: typically 100 per image\n                        # the dimensions of precisions are\n                        # [num_thrs, num_recalls, num_cats, num_area_rngs]\n                        nm = self.coco.load_cats([catId])[0]\n                        precision = precisions[:, :, idx, 0]\n                        precision = precision[precision > -1]\n                        if precision.size:\n                            ap = np.mean(precision)\n                        else:\n                            ap = float('nan')\n                        results_per_category.append(\n                            (f'{nm[\"name\"]}', f'{float(ap):0.3f}'))\n\n                    num_columns = min(6, len(results_per_category) * 2)\n                    results_flatten = list(\n                        itertools.chain(*results_per_category))\n                    headers = ['category', 'AP'] * (num_columns // 2)\n                    results_2d = itertools.zip_longest(*[\n                        results_flatten[i::num_columns]\n                        for i in range(num_columns)\n                    ])\n                    table_data = [headers]\n                    table_data += [result for result in results_2d]\n                    table = AsciiTable(table_data)\n                    print_log('\\n' + table.table, logger=logger)\n\n                for k, v in lvis_results.items():\n                    if k.startswith('AP'):\n                        key = '{}_{}'.format(metric, k)\n                        val = float('{:.4f}'.format(float(v)))\n                        eval_results[key] = val\n                ap_summary = ' '.join([\n                    '{}:{:.4f}'.format(k, float(v))\n                    for k, v in lvis_results.items() if k.startswith('AP')\n                ])\n                eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary\n            lvis_eval.print_results()\n        if tmp_dir is not None:\n            tmp_dir.cleanup()\n        return eval_results\n\n\nLVISDataset = LVISV05Dataset\nDATASETS.register_module(name='LVISDataset', module=LVISDataset)\n\n\n@DATASETS.register_module()\nclass LVISV1Dataset(LVISDataset):\n\n    CLASSES = (\n        'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol',\n        'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna',\n        'apple', 'applesauce', 'apricot', 'apron', 'aquarium',\n        'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor',\n        'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer',\n        'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy',\n        'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel',\n        'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon',\n        'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo',\n        'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow',\n        'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap',\n        'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)',\n        'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)',\n        'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie',\n        'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper',\n        'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt',\n        'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor',\n        'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath',\n        'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card',\n        'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket',\n        'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry',\n        'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg',\n        'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase',\n        'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle',\n        'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)',\n        'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box',\n        'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere',\n        'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase',\n        'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts',\n        'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer',\n        'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn',\n        'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card',\n        'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car',\n        'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf',\n        'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)',\n        'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar',\n        'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup',\n        'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino',\n        'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car',\n        'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship',\n        'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton',\n        'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower',\n        'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone',\n        'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier',\n        'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard',\n        'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime',\n        'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar',\n        'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker',\n        'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider',\n        'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet',\n        'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine',\n        'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock',\n        'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster',\n        'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach',\n        'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table',\n        'coffeepot', 'coil', 'coin', 'colander', 'coleslaw',\n        'coloring_material', 'combination_lock', 'pacifier', 'comic_book',\n        'compass', 'computer_keyboard', 'condiment', 'cone', 'control',\n        'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie',\n        'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)',\n        'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet',\n        'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall',\n        'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker',\n        'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib',\n        'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown',\n        'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch',\n        'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup',\n        'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain',\n        'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard',\n        'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk',\n        'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux',\n        'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher',\n        'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup',\n        'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin',\n        'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly',\n        'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit',\n        'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)',\n        'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell',\n        'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring',\n        'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater',\n        'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk',\n        'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan',\n        'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)',\n        'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm',\n        'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace',\n        'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl',\n        'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap',\n        'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)',\n        'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal',\n        'folding_chair', 'food_processor', 'football_(American)',\n        'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car',\n        'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice',\n        'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage',\n        'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic',\n        'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator',\n        'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture',\n        'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles',\n        'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose',\n        'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat',\n        'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly',\n        'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet',\n        'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock',\n        'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel',\n        'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw',\n        'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband',\n        'headboard', 'headlight', 'headscarf', 'headset',\n        'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet',\n        'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog',\n        'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah',\n        'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce',\n        'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear',\n        'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate',\n        'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board',\n        'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey',\n        'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak',\n        'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono',\n        'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit',\n        'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)',\n        'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)',\n        'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard',\n        'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather',\n        'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce',\n        'license_plate', 'life_buoy', 'life_jacket', 'lightbulb',\n        'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor',\n        'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat',\n        'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)',\n        'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger',\n        'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato',\n        'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox',\n        'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine',\n        'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone',\n        'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror',\n        'mitten', 'mixer_(kitchen_tool)', 'money',\n        'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor',\n        'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)',\n        'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom',\n        'music_stool', 'musical_instrument', 'nailfile', 'napkin',\n        'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper',\n        'newsstand', 'nightshirt', 'nosebag_(for_animals)',\n        'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker',\n        'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil',\n        'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich',\n        'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad',\n        'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas',\n        'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake',\n        'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book',\n        'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol',\n        'parchment', 'parka', 'parking_meter', 'parrot',\n        'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport',\n        'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter',\n        'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg',\n        'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box',\n        'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)',\n        'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet',\n        'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano',\n        'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow',\n        'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball',\n        'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)',\n        'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat',\n        'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)',\n        'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)',\n        'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)',\n        'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato',\n        'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel',\n        'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune',\n        'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher',\n        'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit',\n        'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish',\n        'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat',\n        'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt',\n        'recliner', 'record_player', 'reflector', 'remote_control',\n        'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map',\n        'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade',\n        'rolling_pin', 'root_beer', 'router_(computer_equipment)',\n        'rubber_band', 'runner_(carpet)', 'plastic_bag',\n        'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin',\n        'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)',\n        'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)',\n        'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse',\n        'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf',\n        'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver',\n        'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane',\n        'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark',\n        'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl',\n        'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt',\n        'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass',\n        'shoulder_bag', 'shovel', 'shower_head', 'shower_cap',\n        'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink',\n        'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole',\n        'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)',\n        'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman',\n        'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball',\n        'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon',\n        'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)',\n        'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish',\n        'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)',\n        'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish',\n        'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel',\n        'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer',\n        'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer',\n        'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign',\n        'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl',\n        'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses',\n        'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband',\n        'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword',\n        'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table',\n        'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight',\n        'tambourine', 'army_tank', 'tank_(storage_vessel)',\n        'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure',\n        'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup',\n        'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth',\n        'telephone_pole', 'telephoto_lens', 'television_camera',\n        'television_set', 'tennis_ball', 'tennis_racket', 'tequila',\n        'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread',\n        'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil',\n        'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven',\n        'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush',\n        'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel',\n        'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light',\n        'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline',\n        'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle',\n        'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat',\n        'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)',\n        'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn',\n        'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest',\n        'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture',\n        'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick',\n        'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe',\n        'washbasin', 'automatic_washer', 'watch', 'water_bottle',\n        'water_cooler', 'water_faucet', 'water_heater', 'water_jug',\n        'water_gun', 'water_scooter', 'water_ski', 'water_tower',\n        'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake',\n        'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream',\n        'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)',\n        'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket',\n        'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon',\n        'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt',\n        'yoke_(animal_equipment)', 'zebra', 'zucchini')\n\n    def load_annotations(self, ann_file):\n        try:\n            import lvis\n            if getattr(lvis, '__version__', '0') >= '10.5.3':\n                warnings.warn(\n                    'mmlvis is deprecated, please install official lvis-api by \"pip install git+https://github.com/lvis-dataset/lvis-api.git\"',  # noqa: E501\n                    UserWarning)\n            from lvis import LVIS\n        except ImportError:\n            raise ImportError(\n                'Package lvis is not installed. Please run \"pip install git+https://github.com/lvis-dataset/lvis-api.git\".'  # noqa: E501\n            )\n        self.coco = LVIS(ann_file)\n        self.cat_ids = self.coco.get_cat_ids()\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.img_ids = self.coco.get_img_ids()\n        data_infos = []\n        for i in self.img_ids:\n            info = self.coco.load_imgs([i])[0]\n            # coco_url is used in LVISv1 instead of file_name\n            # e.g. http://images.cocodataset.org/train2017/000000391895.jpg\n            # train/val split in specified in url\n            info['filename'] = info['coco_url'].replace(\n                'http://images.cocodataset.org/', '')\n            data_infos.append(info)\n        return data_infos\n"
  },
  {
    "path": "mmdet/datasets/objects365.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\n\nfrom .api_wrappers import COCO\nfrom .builder import DATASETS\nfrom .coco import CocoDataset\n\n# images exist in annotations but not in image folder.\nobjv2_ignore_list = [\n    osp.join('patch16', 'objects365_v2_00908726.jpg'),\n    osp.join('patch6', 'objects365_v1_00320532.jpg'),\n    osp.join('patch6', 'objects365_v1_00320534.jpg'),\n]\n\n\n@DATASETS.register_module()\nclass Objects365V1Dataset(CocoDataset):\n    \"\"\"Objects365 v1 dataset for detection.\"\"\"\n    CLASSES = (\n        'person', 'sneakers', 'chair', 'hat', 'lamp', 'bottle',\n        'cabinet/shelf', 'cup', 'car', 'glasses', 'picture/frame', 'desk',\n        'handbag', 'street lights', 'book', 'plate', 'helmet', 'leather shoes',\n        'pillow', 'glove', 'potted plant', 'bracelet', 'flower', 'tv',\n        'storage box', 'vase', 'bench', 'wine glass', 'boots', 'bowl',\n        'dining table', 'umbrella', 'boat', 'flag', 'speaker', 'trash bin/can',\n        'stool', 'backpack', 'couch', 'belt', 'carpet', 'basket',\n        'towel/napkin', 'slippers', 'barrel/bucket', 'coffee table', 'suv',\n        'toy', 'tie', 'bed', 'traffic light', 'pen/pencil', 'microphone',\n        'sandals', 'canned', 'necklace', 'mirror', 'faucet', 'bicycle',\n        'bread', 'high heels', 'ring', 'van', 'watch', 'sink', 'horse', 'fish',\n        'apple', 'camera', 'candle', 'teddy bear', 'cake', 'motorcycle',\n        'wild bird', 'laptop', 'knife', 'traffic sign', 'cell phone', 'paddle',\n        'truck', 'cow', 'power outlet', 'clock', 'drum', 'fork', 'bus',\n        'hanger', 'nightstand', 'pot/pan', 'sheep', 'guitar', 'traffic cone',\n        'tea pot', 'keyboard', 'tripod', 'hockey', 'fan', 'dog', 'spoon',\n        'blackboard/whiteboard', 'balloon', 'air conditioner', 'cymbal',\n        'mouse', 'telephone', 'pickup truck', 'orange', 'banana', 'airplane',\n        'luggage', 'skis', 'soccer', 'trolley', 'oven', 'remote',\n        'baseball glove', 'paper towel', 'refrigerator', 'train', 'tomato',\n        'machinery vehicle', 'tent', 'shampoo/shower gel', 'head phone',\n        'lantern', 'donut', 'cleaning products', 'sailboat', 'tangerine',\n        'pizza', 'kite', 'computer box', 'elephant', 'toiletries', 'gas stove',\n        'broccoli', 'toilet', 'stroller', 'shovel', 'baseball bat',\n        'microwave', 'skateboard', 'surfboard', 'surveillance camera', 'gun',\n        'life saver', 'cat', 'lemon', 'liquid soap', 'zebra', 'duck',\n        'sports car', 'giraffe', 'pumpkin', 'piano', 'stop sign', 'radiator',\n        'converter', 'tissue ', 'carrot', 'washing machine', 'vent', 'cookies',\n        'cutting/chopping board', 'tennis racket', 'candy',\n        'skating and skiing shoes', 'scissors', 'folder', 'baseball',\n        'strawberry', 'bow tie', 'pigeon', 'pepper', 'coffee machine',\n        'bathtub', 'snowboard', 'suitcase', 'grapes', 'ladder', 'pear',\n        'american football', 'basketball', 'potato', 'paint brush', 'printer',\n        'billiards', 'fire hydrant', 'goose', 'projector', 'sausage',\n        'fire extinguisher', 'extension cord', 'facial mask', 'tennis ball',\n        'chopsticks', 'electronic stove and gas stove', 'pie', 'frisbee',\n        'kettle', 'hamburger', 'golf club', 'cucumber', 'clutch', 'blender',\n        'tong', 'slide', 'hot dog', 'toothbrush', 'facial cleanser', 'mango',\n        'deer', 'egg', 'violin', 'marker', 'ship', 'chicken', 'onion',\n        'ice cream', 'tape', 'wheelchair', 'plum', 'bar soap', 'scale',\n        'watermelon', 'cabbage', 'router/modem', 'golf ball', 'pine apple',\n        'crane', 'fire truck', 'peach', 'cello', 'notepaper', 'tricycle',\n        'toaster', 'helicopter', 'green beans', 'brush', 'carriage', 'cigar',\n        'earphone', 'penguin', 'hurdle', 'swing', 'radio', 'CD',\n        'parking meter', 'swan', 'garlic', 'french fries', 'horn', 'avocado',\n        'saxophone', 'trumpet', 'sandwich', 'cue', 'kiwi fruit', 'bear',\n        'fishing rod', 'cherry', 'tablet', 'green vegetables', 'nuts', 'corn',\n        'key', 'screwdriver', 'globe', 'broom', 'pliers', 'volleyball',\n        'hammer', 'eggplant', 'trophy', 'dates', 'board eraser', 'rice',\n        'tape measure/ruler', 'dumbbell', 'hamimelon', 'stapler', 'camel',\n        'lettuce', 'goldfish', 'meat balls', 'medal', 'toothpaste', 'antelope',\n        'shrimp', 'rickshaw', 'trombone', 'pomegranate', 'coconut',\n        'jellyfish', 'mushroom', 'calculator', 'treadmill', 'butterfly',\n        'egg tart', 'cheese', 'pig', 'pomelo', 'race car', 'rice cooker',\n        'tuba', 'crosswalk sign', 'papaya', 'hair drier', 'green onion',\n        'chips', 'dolphin', 'sushi', 'urinal', 'donkey', 'electric drill',\n        'spring rolls', 'tortoise/turtle', 'parrot', 'flute', 'measuring cup',\n        'shark', 'steak', 'poker card', 'binoculars', 'llama', 'radish',\n        'noodles', 'yak', 'mop', 'crab', 'microscope', 'barbell', 'bread/bun',\n        'baozi', 'lion', 'red cabbage', 'polar bear', 'lighter', 'seal',\n        'mangosteen', 'comb', 'eraser', 'pitaya', 'scallop', 'pencil case',\n        'saw', 'table tennis paddle', 'okra', 'starfish', 'eagle', 'monkey',\n        'durian', 'game board', 'rabbit', 'french horn', 'ambulance',\n        'asparagus', 'hoverboard', 'pasta', 'target', 'hotair balloon',\n        'chainsaw', 'lobster', 'iron', 'flashlight')\n\n    PALETTE = None\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from COCO style annotation file.\n\n        Args:\n            ann_file (str): Path of annotation file.\n\n        Returns:\n            list[dict]: Annotation info from COCO api.\n        \"\"\"\n\n        self.coco = COCO(ann_file)\n        # 'categories' list in objects365_train.json and objects365_val.\n        # json is inconsistent, need sorted list(or dict) before get cat_ids.\n        cats = self.coco.cats\n        sorted_cats = {i: cats[i] for i in sorted(cats)}\n        self.coco.cats = sorted_cats\n        categories = self.coco.dataset['categories']\n        sorted_categories = sorted(categories, key=lambda i: i['id'])\n        self.coco.dataset['categories'] = sorted_categories\n        # The order of returned `cat_ids` will not\n        # change with the order of the CLASSES\n        self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)\n\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.img_ids = self.coco.get_img_ids()\n        data_infos = []\n        total_ann_ids = []\n        for i in self.img_ids:\n            info = self.coco.load_imgs([i])[0]\n            info['filename'] = info['file_name']\n            data_infos.append(info)\n            ann_ids = self.coco.get_ann_ids(img_ids=[i])\n            total_ann_ids.extend(ann_ids)\n        assert len(set(total_ann_ids)) == len(\n            total_ann_ids), f\"Annotation ids in '{ann_file}' are not unique!\"\n        return data_infos\n\n\n@DATASETS.register_module()\nclass Objects365V2Dataset(CocoDataset):\n    \"\"\"Objects365 v2 dataset for detection.\"\"\"\n\n    CLASSES = (\n        'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp',\n        'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf',\n        'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet',\n        'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower',\n        'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', 'Pillow', 'Boots',\n        'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt',\n        'Moniter/TV', 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker',\n        'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', 'Stool',\n        'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Bakset', 'Drum',\n        'Pen/Pencil', 'Bus', 'Wild Bird', 'High Heels', 'Motorcycle', 'Guitar',\n        'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck',\n        'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy',\n        'Candle', 'Sailboat', 'Laptop', 'Awning', 'Bed', 'Faucet', 'Tent',\n        'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner',\n        'Knife', 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork',\n        'Traffic Sign', 'Ballon', 'Tripod', 'Dog', 'Spoon',\n        'Clock', 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger',\n        'Blackboard/Whiteboard', 'Napkin', 'Other Fish', 'Orange/Tangerine',\n        'Toiletry', 'Keyboard', 'Tomato', 'Lantern',\n        'Machinery Vehicle', 'Fan', 'Green Vegetables', 'Banana',\n        'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer',\n        'Skiboard', 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley',\n        'Head Phone', 'Sports Car', 'Stop Sign', 'Dessert', 'Scooter',\n        'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck',\n        'Baseball Bat', 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli',\n        'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', 'Gun',\n        'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot',\n        'Toilet', 'Kite', 'Strawberry', 'Other Balls', 'Shovel', 'Pepper',\n        'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks',\n        'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board',\n        'Coffee Table', 'Side Table', 'Scissors', 'Marker', 'Pie', 'Ladder',\n        'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball',\n        'Zebra', 'Grape', 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin',\n        'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', 'Billards',\n        'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase',\n        'Cucumber', 'Cigar/Cigarette ', 'Paint Brush', 'Pear', 'Heavy Truck',\n        'Hamburger', 'Extractor', 'Extention Cord', 'Tong', 'Tennis Racket',\n        'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis',\n        'Ship', 'Swing', 'Coffee Machine', 'Slide', 'Carriage', 'Onion',\n        'Green beans', 'Projector', 'Frisbee',\n        'Washing Machine/Drying Machine', 'Chicken', 'Printer', 'Watermelon',\n        'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hotair ballon',\n        'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog',\n        'Blender', 'Peach', 'Rice', 'Wallet/Purse', 'Volleyball', 'Deer',\n        'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple',\n        'Golf Ball', 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle',\n        'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', 'Megaphone',\n        'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion',\n        'Sandwich', 'Nuts', 'Speed Limit Sign', 'Induction Cooker', 'Broom',\n        'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit',\n        'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese',\n        'Notepaper', 'Cherry', 'Pliers', 'CD', 'Pasta', 'Hammer', 'Cue',\n        'Avocado', 'Hamimelon', 'Flask', 'Mushroon', 'Screwdriver', 'Soap',\n        'Recorder', 'Bear', 'Eggplant', 'Board Eraser', 'Coconut',\n        'Tape Measur/ Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', 'Steak',\n        'Crosswalk Sign', 'Stapler', 'Campel', 'Formula 1 ', 'Pomegranate',\n        'Dishwasher', 'Crab', 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba',\n        'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', 'Buttefly',\n        'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill',\n        'Hair Dryer', 'Egg tart', 'Jellyfish', 'Treadmill', 'Lighter',\n        'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target',\n        'French', 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak',\n        'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', 'Scallop',\n        'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Teniis paddle',\n        'Cosmetics Brush/Eyeliner Pencil', 'Chainsaw', 'Eraser', 'Lobster',\n        'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling',\n        'Table Tennis ')\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from COCO style annotation file.\n\n        Args:\n            ann_file (str): Path of annotation file.\n\n        Returns:\n            list[dict]: Annotation info from COCO api.\n        \"\"\"\n\n        self.coco = COCO(ann_file)\n        # The order of returned `cat_ids` will not\n        # change with the order of the CLASSES\n        self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES)\n\n        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}\n        self.img_ids = self.coco.get_img_ids()\n        data_infos = []\n        total_ann_ids = []\n        for i in self.img_ids:\n            info = self.coco.load_imgs([i])[0]\n            file_name = osp.join(\n                osp.split(osp.split(info['file_name'])[0])[-1],\n                osp.split(info['file_name'])[-1])\n            info['file_name'] = file_name\n            if info['file_name'] in objv2_ignore_list:\n                continue\n            info['filename'] = info['file_name']\n            data_infos.append(info)\n            ann_ids = self.coco.get_ann_ids(img_ids=[i])\n            total_ann_ids.extend(ann_ids)\n        assert len(set(total_ann_ids)) == len(\n            total_ann_ids), f\"Annotation ids in '{ann_file}' are not unique!\"\n        return data_infos\n"
  },
  {
    "path": "mmdet/datasets/openimages.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport csv\nimport json\nimport os.path as osp\nimport warnings\nfrom collections import OrderedDict, defaultdict\n\nimport mmcv\nimport numpy as np\nimport torch.distributed as dist\nfrom mmcv.runner import get_dist_info\nfrom mmcv.utils import print_log\n\nfrom mmdet.core import eval_map\nfrom .builder import DATASETS\nfrom .custom import CustomDataset\n\n\n@DATASETS.register_module()\nclass OpenImagesDataset(CustomDataset):\n    \"\"\"Open Images dataset for detection.\n\n    Args:\n        ann_file (str): Annotation file path.\n        label_file (str): File path of the label description file that\n            maps the classes names in MID format to their short\n            descriptions.\n        image_level_ann_file (str): Image level annotation, which is used\n            in evaluation.\n        get_supercategory (bool): Whether to get parent class of the\n            current class. Default: True.\n        hierarchy_file (str): The file path of the class hierarchy.\n            Default: None.\n        get_metas (bool): Whether to get image metas in testing or\n            validation time. This should be `True` during evaluation.\n            Default: True. The OpenImages annotations do not have image\n            metas (width and height of the image), which will be used\n            during evaluation. We provide two ways to get image metas\n            in `OpenImagesDataset`:\n\n            - 1. `load from file`: Load image metas from pkl file, which\n              is suggested to use. We provided a script to get image metas:\n              `tools/misc/get_image_metas.py`, which need to run\n              this script before training/testing. Please refer to\n              `config/openimages/README.md` for more details.\n\n            - 2. `load from pipeline`, which will get image metas during\n              test time. However, this may reduce the inference speed,\n              especially when using distribution.\n\n        load_from_file (bool): Whether to get image metas from pkl file.\n        meta_file (str): File path to get image metas.\n        filter_labels (bool): Whether filter unannotated classes.\n            Default: True.\n        load_image_level_labels (bool): Whether load and consider image\n            level labels during evaluation. Default: True.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmcv.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(self,\n                 ann_file,\n                 label_file='',\n                 image_level_ann_file='',\n                 get_supercategory=True,\n                 hierarchy_file=None,\n                 get_metas=True,\n                 load_from_file=True,\n                 meta_file='',\n                 filter_labels=True,\n                 load_image_level_labels=True,\n                 file_client_args=dict(backend='disk'),\n                 **kwargs):\n        # may get error if use other file_client\n        self.file_client_args = file_client_args\n\n        self.cat2label = defaultdict(str)\n        self.index_dict = {}\n\n        # Although it will init file_client in `CustomDataset`,\n        # it needs to be init here.\n        file_client = mmcv.FileClient(**file_client_args)\n        # need get `index_dict` before load annotations\n        assert label_file.endswith('csv')\n        if hasattr(file_client, 'get_local_path'):\n            with file_client.get_local_path(label_file) as local_path:\n                class_names = self.get_classes_from_csv(local_path)\n        else:\n            class_names = self.get_classes_from_csv(label_file)\n        super(OpenImagesDataset, self).__init__(\n            ann_file=ann_file, file_client_args=file_client_args, **kwargs)\n        self.CLASSES = class_names\n        self.image_level_ann_file = image_level_ann_file\n        self.load_image_level_labels = load_image_level_labels\n        if get_supercategory is True:\n            assert hierarchy_file is not None\n            if self.__class__.__name__ == 'OpenImagesDataset':\n                assert hierarchy_file.endswith('json')\n            elif self.__class__.__name__ == 'OpenImagesChallengeDataset':\n                assert hierarchy_file.endswith('np')\n            else:\n                raise NotImplementedError\n            if hasattr(self.file_client, 'get_local_path'):\n                with self.file_client.get_local_path(\n                        hierarchy_file) as local_path:\n                    self.class_label_tree = self.get_relation_matrix(\n                        local_path)\n            else:\n                self.class_label_tree = self.get_relation_matrix(\n                    hierarchy_file)\n        self.get_supercategory = get_supercategory\n        self.get_metas = get_metas\n        self.load_from_file = load_from_file\n        self.meta_file = meta_file\n        if self.data_root is not None:\n            if not osp.isabs(self.meta_file):\n                self.meta_file = osp.join(self.data_root, self.meta_file)\n        self.filter_labels = filter_labels\n        self.rank, self.world_size = get_dist_info()\n        self.temp_img_metas = []\n        self.test_img_metas = []\n        self.test_img_shapes = []\n        self.load_from_pipeline = False if load_from_file else True\n\n    def get_classes_from_csv(self, label_file):\n        \"\"\"Get classes name from file.\n\n        Args:\n            label_file (str): File path of the label description file that\n                maps the classes names in MID format to their short\n                descriptions.\n\n        Returns:\n            list[str]: Class name of OpenImages.\n        \"\"\"\n\n        index_list = []\n        classes_names = []\n        with open(label_file, 'r') as f:\n            reader = csv.reader(f)\n            for line in reader:\n                self.cat2label[line[0]] = line[1]\n                classes_names.append(line[1])\n                index_list.append(line[0])\n        self.index_dict = {index: i for i, index in enumerate(index_list)}\n        return classes_names\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from annotation file.\n\n        Special described `self.data_infos` (defaultdict[list[dict]])\n        in this function: Annotations where item of the defaultdict\n        indicates an image, each of which has (n) dicts. Keys of dicts are:\n\n            - `bbox` (list): coordinates of the box, in normalized image\n              coordinates, of shape 4.\n            - `label` (int): the label id.\n            - `is_group_of` (bool):  Indicates that the box spans a group\n              of objects (e.g., a bed of flowers or a crowd of people).\n            - `is_occluded` (bool): Indicates that the object is occluded\n              by another object in the image.\n            - `is_truncated` (bool): Indicates that the object extends\n              beyond the boundary of the image.\n            - `is_depiction` (bool): Indicates that the object is a\n              depiction.\n            - `is_inside` (bool): Indicates a picture taken from the\n              inside of the object.\n\n        Args:\n            ann_file (str): CSV style annotation file path.\n\n        Returns:\n            list[dict]:  Data infos where each item of the list\n            indicates an image. Keys of annotations are:\n\n                - `img_id` (str): Image name.\n                - `filename` (str): Image name with suffix.\n        \"\"\"\n        self.ann_infos = defaultdict(list)\n        data_infos = []\n        cp_filename = None\n        with open(ann_file, 'r') as f:\n            reader = csv.reader(f)\n            for i, line in enumerate(reader):\n                if i == 0:\n                    continue\n                img_id = line[0]\n                filename = f'{img_id}.jpg'\n                label_id = line[2]\n                assert label_id in self.index_dict\n                label = int(self.index_dict[label_id])\n                bbox = [\n                    float(line[4]),  # xmin\n                    float(line[6]),  # ymin\n                    float(line[5]),  # xmax\n                    float(line[7])  # ymax\n                ]\n                is_occluded = True if int(line[8]) == 1 else False\n                is_truncated = True if int(line[9]) == 1 else False\n                is_group_of = True if int(line[10]) == 1 else False\n                is_depiction = True if int(line[11]) == 1 else False\n                is_inside = True if int(line[12]) == 1 else False\n\n                self.ann_infos[img_id].append(\n                    dict(\n                        bbox=bbox,\n                        label=label,\n                        is_occluded=is_occluded,\n                        is_truncated=is_truncated,\n                        is_group_of=is_group_of,\n                        is_depiction=is_depiction,\n                        is_inside=is_inside))\n                if filename != cp_filename:\n                    data_infos.append(dict(img_id=img_id, filename=filename))\n                    cp_filename = filename\n        return data_infos\n\n    def get_ann_info(self, idx):\n        \"\"\"Get OpenImages annotation by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Annotation info of specified index.\n        \"\"\"\n        img_id = self.data_infos[idx]['img_id']\n        bboxes = []\n        labels = []\n        bboxes_ignore = []\n        labels_ignore = []\n        is_occludeds = []\n        is_truncateds = []\n        is_group_ofs = []\n        is_depictions = []\n        is_insides = []\n        for obj in self.ann_infos[img_id]:\n            label = int(obj['label'])\n            bbox = [\n                float(obj['bbox'][0]),\n                float(obj['bbox'][1]),\n                float(obj['bbox'][2]),\n                float(obj['bbox'][3])\n            ]\n            bboxes.append(bbox)\n            labels.append(label)\n\n            # Other parameters\n            is_occludeds.append(obj['is_occluded'])\n            is_truncateds.append(obj['is_truncated'])\n            is_group_ofs.append(obj['is_group_of'])\n            is_depictions.append(obj['is_depiction'])\n            is_insides.append(obj['is_inside'])\n        if not bboxes:\n            bboxes = np.zeros((0, 4))\n            labels = np.zeros((0, ))\n        else:\n            bboxes = np.array(bboxes)\n            labels = np.array(labels)\n        if not bboxes_ignore:\n            bboxes_ignore = np.zeros((0, 4))\n            labels_ignore = np.zeros((0, ))\n        else:\n            bboxes_ignore = np.array(bboxes_ignore)\n            labels_ignore = np.array(labels_ignore)\n\n        assert len(is_group_ofs) == len(labels) == len(bboxes)\n        gt_is_group_ofs = np.array(is_group_ofs, dtype=bool)\n\n        # These parameters is not used yet.\n        is_occludeds = np.array(is_occludeds, dtype=bool)\n        is_truncateds = np.array(is_truncateds, dtype=bool)\n        is_depictions = np.array(is_depictions, dtype=bool)\n        is_insides = np.array(is_insides, dtype=bool)\n\n        ann = dict(\n            bboxes=bboxes.astype(np.float32),\n            labels=labels.astype(np.int64),\n            bboxes_ignore=bboxes_ignore.astype(np.float32),\n            labels_ignore=labels_ignore.astype(np.int64),\n            gt_is_group_ofs=gt_is_group_ofs,\n            is_occludeds=is_occludeds,\n            is_truncateds=is_truncateds,\n            is_depictions=is_depictions,\n            is_insides=is_insides)\n\n        return ann\n\n    def get_meta_from_file(self, meta_file=''):\n        \"\"\"Get image metas from pkl file.\"\"\"\n        metas = mmcv.load(\n            meta_file,\n            file_format='pkl',\n            file_client_args=self.file_client_args)\n        assert len(metas) == len(self)\n        for i in range(len(metas)):\n            file_name = osp.split(metas[i]['filename'])[-1]\n            img_info = self.data_infos[i].get('img_info', None)\n            if img_info is not None:\n                assert file_name == osp.split(img_info['filename'])[-1]\n            else:\n                assert file_name == self.data_infos[i]['filename']\n            hw = metas[i]['ori_shape'][:2]\n            self.test_img_shapes.append(hw)\n\n    def get_meta_from_pipeline(self, results):\n        \"\"\"Get image metas from pipeline.\"\"\"\n        self.temp_img_metas.extend(results['img_metas'])\n        if dist.is_available() and self.world_size > 1:\n            from mmdet.apis.test import collect_results_cpu\n\n            self.test_img_metas = collect_results_cpu(self.temp_img_metas,\n                                                      len(self))\n        else:\n            self.test_img_metas = self.temp_img_metas\n\n    def get_img_shape(self, metas):\n        \"\"\"Set images original shape into data_infos.\"\"\"\n        assert len(metas) == len(self)\n        for i in range(len(metas)):\n            file_name = osp.split(metas[i].data['ori_filename'])[-1]\n            img_info = self.data_infos[i].get('img_info', None)\n            if img_info is not None:\n                assert file_name == osp.split(img_info['filename'])[-1]\n            else:\n                assert file_name == self.data_infos[i]['filename']\n            hw = metas[i].data['ori_shape'][:2]\n            self.test_img_shapes.append(hw)\n\n    def prepare_test_img(self, idx):\n        \"\"\"Get testing data after pipeline.\"\"\"\n        img_info = self.data_infos[idx]\n        results = dict(img_info=img_info)\n        if self.proposals is not None:\n            results['proposals'] = self.proposals[idx]\n        self.pre_pipeline(results)\n        results = self.pipeline(results)\n        if self.get_metas and self.load_from_pipeline:\n            self.get_meta_from_pipeline(results)\n        return results\n\n    def _filter_imgs(self, min_size=32):\n        \"\"\"Filter images too small.\"\"\"\n        if self.filter_empty_gt:\n            warnings.warn('OpenImageDatasets does not support '\n                          'filtering empty gt images.')\n        valid_inds = [i for i in range(len(self))]\n        return valid_inds\n\n    def _set_group_flag(self):\n        \"\"\"Set flag according to image aspect ratio.\"\"\"\n        self.flag = np.zeros(len(self), dtype=np.uint8)\n        # TODO: set flag without width and height\n\n    def get_relation_matrix(self, hierarchy_file):\n        \"\"\"Get hierarchy for classes.\n\n        Args:\n            hierarchy_file (sty): File path to the hierarchy for classes.\n\n        Returns:\n            ndarray: The matrix of the corresponding relationship between\n            the parent class and the child class, of shape\n            (class_num, class_num).\n        \"\"\"\n\n        if self.data_root is not None:\n            if not osp.isabs(hierarchy_file):\n                hierarchy_file = osp.join(self.data_root, hierarchy_file)\n        with open(hierarchy_file, 'r') as f:\n            hierarchy = json.load(f)\n        class_num = len(self.CLASSES)\n        class_label_tree = np.eye(class_num, class_num)\n        class_label_tree = self._convert_hierarchy_tree(\n            hierarchy, class_label_tree)\n        return class_label_tree\n\n    def _convert_hierarchy_tree(self,\n                                hierarchy_map,\n                                class_label_tree,\n                                parents=[],\n                                get_all_parents=True):\n        \"\"\"Get matrix of the corresponding relationship between the parent\n        class and the child class.\n\n        Args:\n            hierarchy_map (dict): Including label name and corresponding\n                subcategory. Keys of dicts are:\n\n                - `LabeName` (str): Name of the label.\n                - `Subcategory` (dict | list): Corresponding subcategory(ies).\n            class_label_tree (ndarray): The matrix of the corresponding\n                relationship between the parent class and the child class,\n                of shape (class_num, class_num).\n            parents (list): Corresponding parent class.\n            get_all_parents (bool): Whether get all parent names.\n                Default: True\n\n        Returns:\n            ndarray: The matrix of the corresponding relationship between\n            the parent class and the child class, of shape\n            (class_num, class_num).\n        \"\"\"\n\n        if 'Subcategory' in hierarchy_map:\n            for node in hierarchy_map['Subcategory']:\n                if 'LabelName' in node:\n                    children_name = node['LabelName']\n                    children_index = self.index_dict[children_name]\n                    children = [children_index]\n                else:\n                    continue\n                if len(parents) > 0:\n                    for parent_index in parents:\n                        if get_all_parents:\n                            children.append(parent_index)\n                        class_label_tree[children_index, parent_index] = 1\n\n                class_label_tree = self._convert_hierarchy_tree(\n                    node, class_label_tree, parents=children)\n\n        return class_label_tree\n\n    def add_supercategory_ann(self, annotations):\n        \"\"\"Add parent classes of the corresponding class of the ground truth\n        bboxes.\"\"\"\n        for i, ann in enumerate(annotations):\n            assert len(ann['labels']) == len(ann['bboxes']) == \\\n                   len(ann['gt_is_group_ofs'])\n            gt_bboxes = []\n            gt_is_group_ofs = []\n            gt_labels = []\n            for j in range(len(ann['labels'])):\n                label = ann['labels'][j]\n                bbox = ann['bboxes'][j]\n                is_group = ann['gt_is_group_ofs'][j]\n                label = np.where(self.class_label_tree[label])[0]\n                if len(label) > 1:\n                    for k in range(len(label)):\n                        gt_bboxes.append(bbox)\n                        gt_is_group_ofs.append(is_group)\n                        gt_labels.append(label[k])\n                else:\n                    gt_bboxes.append(bbox)\n                    gt_is_group_ofs.append(is_group)\n                    gt_labels.append(label[0])\n            annotations[i] = dict(\n                bboxes=np.array(gt_bboxes).astype(np.float32),\n                labels=np.array(gt_labels).astype(np.int64),\n                bboxes_ignore=ann['bboxes_ignore'],\n                gt_is_group_ofs=np.array(gt_is_group_ofs).astype(bool))\n\n        return annotations\n\n    def process_results(self, det_results, annotations,\n                        image_level_annotations):\n        \"\"\"Process results of the corresponding class of the detection bboxes.\n\n        Note: It will choose to do the following two processing according to\n        the parameters:\n\n        1. Whether to add parent classes of the corresponding class of the\n        detection bboxes.\n\n        2. Whether to ignore the classes that unannotated on that image.\n        \"\"\"\n        if image_level_annotations is not None:\n            assert len(annotations) == \\\n                   len(image_level_annotations) == \\\n                   len(det_results)\n        else:\n            assert len(annotations) == len(det_results)\n        for i in range(len(det_results)):\n            results = copy.deepcopy(det_results[i])\n            valid_classes = np.where(\n                np.array([[bbox.shape[0]] for bbox in det_results[i]]) != 0)[0]\n            if image_level_annotations is not None:\n                labels = annotations[i]['labels']\n                image_level_labels = \\\n                    image_level_annotations[i]['image_level_labels']\n                allowed_labeles = np.unique(\n                    np.append(labels, image_level_labels))\n            else:\n                allowed_labeles = np.unique(annotations[i]['labels'])\n\n            for valid_class in valid_classes:\n                det_cls = np.where(self.class_label_tree[valid_class])[0]\n                for index in det_cls:\n                    if index in allowed_labeles and \\\n                            index != valid_class and \\\n                            self.get_supercategory:\n                        det_results[i][index] = \\\n                            np.concatenate((det_results[i][index],\n                                            results[valid_class]))\n                    elif index not in allowed_labeles and self.filter_labels:\n                        # Remove useless parts\n                        det_results[i][index] = np.empty(\n                            (0, 5)).astype(np.float32)\n        return det_results\n\n    def load_image_label_from_csv(self, image_level_ann_file):\n        \"\"\"Load image level annotations from csv style ann_file.\n\n        Args:\n            image_level_ann_file (str): CSV style image level annotation\n                file path.\n\n        Returns:\n            defaultdict[list[dict]]: Annotations where item of the defaultdict\n            indicates an image, each of which has (n) dicts.\n            Keys of dicts are:\n\n                - `image_level_label` (int): Label id.\n                - `confidence` (float): Labels that are human-verified to be\n                  present in an image have confidence = 1 (positive labels).\n                  Labels that are human-verified to be absent from an image\n                  have confidence = 0 (negative labels). Machine-generated\n                  labels have fractional confidences, generally >= 0.5.\n                  The higher the confidence, the smaller the chance for\n                  the label to be a false positive.\n        \"\"\"\n\n        item_lists = defaultdict(list)\n        with open(image_level_ann_file, 'r') as f:\n            reader = csv.reader(f)\n            for i, line in enumerate(reader):\n                if i == 0:\n                    continue\n                img_id = line[0]\n                item_lists[img_id].append(\n                    dict(\n                        image_level_label=int(self.index_dict[line[2]]),\n                        confidence=float(line[3])))\n        return item_lists\n\n    def get_image_level_ann(self, image_level_ann_file):\n        \"\"\"Get OpenImages annotation by index.\n\n        Args:\n            image_level_ann_file (str): CSV style image level annotation\n                file path.\n\n        Returns:\n            dict: Annotation info of specified index.\n        \"\"\"\n\n        if hasattr(self.file_client, 'get_local_path'):\n            with self.file_client.get_local_path(image_level_ann_file) \\\n                    as local_path:\n                item_lists = self.load_image_label_from_csv(local_path)\n        else:\n            item_lists = self.load_image_label_from_csv(image_level_ann_file)\n        image_level_annotations = []\n        for i in range(len(self)):\n            img_info = self.data_infos[i].get('img_info', None)\n            if img_info is not None:\n                # for Open Images Challenges\n                img_id = osp.split(img_info['filename'])[-1][:-4]\n            else:\n                # for Open Images v6\n                img_id = self.data_infos[i]['img_id']\n            item_list = item_lists.get(img_id, None)\n            if item_list is not None:\n                image_level_labels = []\n                confidences = []\n                for obj in item_list:\n                    image_level_label = int(obj['image_level_label'])\n                    confidence = float(obj['confidence'])\n\n                    image_level_labels.append(image_level_label)\n                    confidences.append(confidence)\n\n                if not image_level_labels:\n                    image_level_labels = np.zeros((0, ))\n                    confidences = np.zeros((0, ))\n                else:\n                    image_level_labels = np.array(image_level_labels)\n                    confidences = np.array(confidences)\n            else:\n                image_level_labels = np.zeros((0, ))\n                confidences = np.zeros((0, ))\n            ann = dict(\n                image_level_labels=image_level_labels.astype(np.int64),\n                confidences=confidences.astype(np.float32))\n            image_level_annotations.append(ann)\n\n        return image_level_annotations\n\n    def denormalize_gt_bboxes(self, annotations):\n        \"\"\"Convert ground truth bboxes from relative position to absolute\n        position.\n\n        Only used in evaluating time.\n        \"\"\"\n        assert len(self.test_img_shapes) == len(annotations)\n        for i in range(len(annotations)):\n            h, w = self.test_img_shapes[i]\n            annotations[i]['bboxes'][:, 0::2] *= w\n            annotations[i]['bboxes'][:, 1::2] *= h\n        return annotations\n\n    def get_cat_ids(self, idx):\n        \"\"\"Get category ids by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            list[int]: All categories in the image of specified index.\n        \"\"\"\n        return self.get_ann_info(idx)['labels'].astype(np.int).tolist()\n\n    def evaluate(self,\n                 results,\n                 metric='mAP',\n                 logger=None,\n                 iou_thr=0.5,\n                 ioa_thr=0.5,\n                 scale_ranges=None,\n                 denorm_gt_bbox=True,\n                 use_group_of=True):\n        \"\"\"Evaluate in OpenImages.\n\n        Args:\n            results (list[list | tuple]): Testing results of the dataset.\n            metric (str | list[str]): Metrics to be evaluated. Option is\n                 'mAP'. Default: 'mAP'.\n            logger (logging.Logger | str, optional): Logger used for printing\n                related information during evaluation. Default: None.\n            iou_thr (float | list[float]): IoU threshold. Default: 0.5.\n            ioa_thr (float | list[float]): IoA threshold. Default: 0.5.\n            scale_ranges (list[tuple], optional): Scale ranges for evaluating\n                mAP. If not specified, all bounding boxes would be included in\n                evaluation. Default: None\n            denorm_gt_bbox (bool): Whether to denorm ground truth bboxes from\n                relative position to absolute position. Default: True\n            use_group_of (bool): Whether consider group of groud truth bboxes\n                during evaluating. Default: True.\n\n        Returns:\n            dict[str, float]: AP metrics.\n        \"\"\"\n\n        if not isinstance(metric, str):\n            assert len(metric) == 1\n            metric = metric[0]\n        allowed_metrics = ['mAP']\n        if metric not in allowed_metrics:\n            raise KeyError(f'metric {metric} is not supported')\n        annotations = [self.get_ann_info(i) for i in range(len(self))]\n\n        if self.load_image_level_labels:\n            image_level_annotations = \\\n                self.get_image_level_ann(self.image_level_ann_file)\n        else:\n            image_level_annotations = None\n\n        # load metas from file\n        if self.get_metas and self.load_from_file:\n            assert self.meta_file.endswith(\n                'pkl'), 'File name must be pkl suffix'\n            self.get_meta_from_file(self.meta_file)\n        # load metas from pipeline\n        else:\n            self.get_img_shape(self.test_img_metas)\n\n        if len(self.test_img_shapes) > len(self):\n            self.test_img_shapes = self.test_img_shapes[:len(self)]\n\n        if denorm_gt_bbox:\n            annotations = self.denormalize_gt_bboxes(annotations)\n\n        # Reset test_image_metas, temp_image_metas and test_img_shapes\n        # to avoid potential error\n        self.temp_img_metas = []\n        self.test_img_shapes = []\n        self.test_img_metas = []\n        if self.get_supercategory:\n            annotations = self.add_supercategory_ann(annotations)\n\n        results = self.process_results(results, annotations,\n                                       image_level_annotations)\n        if use_group_of:\n            assert ioa_thr is not None, \\\n                'ioa_thr must have value when using group_of in evaluation.'\n\n        eval_results = OrderedDict()\n        iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr\n        ioa_thrs = [ioa_thr] if isinstance(ioa_thr, float) or ioa_thr is None \\\n            else ioa_thr\n\n        # get dataset type\n        if len(self.CLASSES) == 500:\n            ds_name = 'oid_challenge'\n        elif len(self.CLASSES) == 601:\n            ds_name = 'oid_v6'\n        else:\n            ds_name = self.CLASSES\n            warnings.warn('Cannot infer dataset type from the length of the '\n                          'classes. Set `oid_v6` as dataset type.')\n\n        if metric == 'mAP':\n            assert isinstance(iou_thrs, list) and isinstance(ioa_thrs, list)\n            assert len(ioa_thrs) == len(iou_thrs)\n            mean_aps = []\n            for iou_thr, ioa_thr in zip(iou_thrs, ioa_thrs):\n                print_log(f'\\n{\"-\" * 15}iou_thr, ioa_thr: {iou_thr}, {ioa_thr}'\n                          f'{\"-\" * 15}')\n                mean_ap, _ = eval_map(\n                    results,\n                    annotations,\n                    scale_ranges=scale_ranges,\n                    iou_thr=iou_thr,\n                    ioa_thr=ioa_thr,\n                    dataset=ds_name,\n                    logger=logger,\n                    use_group_of=use_group_of)\n                mean_aps.append(mean_ap)\n                eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)\n            eval_results['mAP'] = sum(mean_aps) / len(mean_aps)\n        return eval_results\n\n\n@DATASETS.register_module()\nclass OpenImagesChallengeDataset(OpenImagesDataset):\n    \"\"\"Open Images Challenge dataset for detection.\"\"\"\n\n    def __init__(self, ann_file, **kwargs):\n        assert ann_file.endswith('txt')\n        super(OpenImagesChallengeDataset, self).__init__(\n            ann_file=ann_file, **kwargs)\n\n    def get_classes_from_csv(self, label_file):\n        \"\"\"Get classes name from file.\n\n        Args:\n            label_file (str): File path of the label description file that\n                maps the classes names in MID format to their short\n                descriptions.\n\n        Returns:\n            list: Class name of OpenImages.\n        \"\"\"\n\n        label_list = []\n        id_list = []\n        with open(label_file, 'r') as f:\n            reader = csv.reader(f)\n            for line in reader:\n                label_name = line[0]\n                label_id = int(line[2])\n\n                label_list.append(line[1])\n                id_list.append(label_id)\n                self.index_dict[label_name] = label_id - 1\n\n        indexes = np.argsort(id_list)\n        classes_names = []\n        for index in indexes:\n            classes_names.append(label_list[index])\n        return classes_names\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from annotation file.\"\"\"\n        with open(ann_file) as f:\n            lines = f.readlines()\n        i = 0\n        ann_infos = []\n        while i < len(lines):\n            bboxes = []\n            labels = []\n            is_group_ofs = []\n            filename = lines[i].rstrip()\n            i += 2\n            img_gt_size = int(lines[i])\n            i += 1\n            for j in range(img_gt_size):\n                sp = lines[i + j].split()\n                bboxes.append(\n                    [float(sp[1]),\n                     float(sp[2]),\n                     float(sp[3]),\n                     float(sp[4])])\n                labels.append(int(sp[0]) - 1)  # labels begin from 1\n                is_group_ofs.append(True if int(sp[5]) == 1 else False)\n            i += img_gt_size\n\n            gt_bboxes = np.array(bboxes, dtype=np.float32)\n            gt_labels = np.array(labels, dtype=np.int64)\n            gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n            gt_is_group_ofs = np.array(is_group_ofs, dtype=bool)\n\n            img_info = dict(filename=filename)\n            ann_info = dict(\n                bboxes=gt_bboxes,\n                labels=gt_labels,\n                bboxes_ignore=gt_bboxes_ignore,\n                gt_is_group_ofs=gt_is_group_ofs)\n            ann_infos.append(dict(img_info=img_info, ann_info=ann_info))\n\n        return ann_infos\n\n    def prepare_train_img(self, idx):\n        \"\"\"Get training data and annotations after pipeline.\"\"\"\n        ann_info = self.data_infos[idx]\n        results = dict(\n            img_info=ann_info['img_info'],\n            ann_info=ann_info['ann_info'],\n        )\n        if self.proposals is not None:\n            results['proposals'] = self.proposals[idx]\n        self.pre_pipeline(results)\n        return self.pipeline(results)\n\n    def prepare_test_img(self, idx):\n        \"\"\"Get testing data after pipeline.\"\"\"\n        ann_info = self.data_infos[idx]\n        results = dict(img_info=ann_info['img_info'])\n        if self.proposals is not None:\n            results['proposals'] = self.proposals[idx]\n        self.pre_pipeline(results)\n\n        results = self.pipeline(results)\n        if self.get_metas and self.load_from_pipeline:\n            self.get_meta_from_pipeline(results)\n        return results\n\n    def get_relation_matrix(self, hierarchy_file):\n        \"\"\"Get hierarchy for classes.\n\n        Args:\n            hierarchy_file (str): File path to the hierarchy for classes.\n\n        Returns:\n            ndarray: The matrix of the corresponding\n            relationship between the parent class and the child class,\n            of shape (class_num, class_num).\n        \"\"\"\n        class_label_tree = np.load(hierarchy_file, allow_pickle=True)\n        return class_label_tree[1:, 1:]\n\n    def get_ann_info(self, idx):\n        \"\"\"Get OpenImages annotation by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Annotation info of specified index.\n        \"\"\"\n        # avoid some potential error\n        data_infos = copy.deepcopy(self.data_infos[idx]['ann_info'])\n        return data_infos\n\n    def load_image_label_from_csv(self, image_level_ann_file):\n        \"\"\"Load image level annotations from csv style ann_file.\n\n        Args:\n            image_level_ann_file (str): CSV style image level annotation\n                file path.\n\n        Returns:\n            defaultdict[list[dict]]: Annotations where item of the defaultdict\n            indicates an image, each of which has (n) dicts.\n            Keys of dicts are:\n\n                - `image_level_label` (int): of shape 1.\n                - `confidence` (float): of shape 1.\n        \"\"\"\n\n        item_lists = defaultdict(list)\n        with open(image_level_ann_file, 'r') as f:\n            reader = csv.reader(f)\n            i = -1\n            for line in reader:\n                i += 1\n                if i == 0:\n                    continue\n                else:\n                    img_id = line[0]\n                    label_id = line[1]\n                    assert label_id in self.index_dict\n                    image_level_label = int(self.index_dict[label_id])\n                    confidence = float(line[2])\n                    item_lists[img_id].append(\n                        dict(\n                            image_level_label=image_level_label,\n                            confidence=confidence))\n        return item_lists\n"
  },
  {
    "path": "mmdet/datasets/pipelines/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .auto_augment import (AutoAugment, BrightnessTransform, ColorTransform,\n                           ContrastTransform, EqualizeTransform, Rotate, Shear,\n                           Translate)\nfrom .compose import Compose\nfrom .formatting import (Collect, DefaultFormatBundle, ImageToTensor,\n                         ToDataContainer, ToTensor, Transpose, to_tensor)\nfrom .instaboost import InstaBoost\nfrom .loading import (FilterAnnotations, LoadAnnotations, LoadImageFromFile,\n                      LoadImageFromWebcam, LoadMultiChannelImageFromFiles,\n                      LoadPanopticAnnotations, LoadProposals)\nfrom .test_time_aug import MultiScaleFlipAug\nfrom .transforms import (Albu, CopyPaste, CutOut, Expand, MinIoURandomCrop,\n                         MixUp, Mosaic, Normalize, Pad, PhotoMetricDistortion,\n                         RandomAffine, RandomCenterCropPad, RandomCrop,\n                         RandomFlip, RandomShift, Resize, SegRescale,\n                         YOLOXHSVRandomAug)\n\n__all__ = [\n    'Compose', 'to_tensor', 'ToTensor', 'ImageToTensor', 'ToDataContainer',\n    'Transpose', 'Collect', 'DefaultFormatBundle', 'LoadAnnotations',\n    'LoadImageFromFile', 'LoadImageFromWebcam', 'LoadPanopticAnnotations',\n    'LoadMultiChannelImageFromFiles', 'LoadProposals', 'FilterAnnotations',\n    'MultiScaleFlipAug', 'Resize', 'RandomFlip', 'Pad', 'RandomCrop',\n    'Normalize', 'SegRescale', 'MinIoURandomCrop', 'Expand',\n    'PhotoMetricDistortion', 'Albu', 'InstaBoost', 'RandomCenterCropPad',\n    'AutoAugment', 'CutOut', 'Shear', 'Rotate', 'ColorTransform',\n    'EqualizeTransform', 'BrightnessTransform', 'ContrastTransform',\n    'Translate', 'RandomShift', 'Mosaic', 'MixUp', 'RandomAffine',\n    'YOLOXHSVRandomAug', 'CopyPaste'\n]\n"
  },
  {
    "path": "mmdet/datasets/pipelines/auto_augment.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport cv2\nimport mmcv\nimport numpy as np\n\nfrom ..builder import PIPELINES\nfrom .compose import Compose\n\n_MAX_LEVEL = 10\n\n\ndef level_to_value(level, max_value):\n    \"\"\"Map from level to values based on max_value.\"\"\"\n    return (level / _MAX_LEVEL) * max_value\n\n\ndef enhance_level_to_value(level, a=1.8, b=0.1):\n    \"\"\"Map from level to values.\"\"\"\n    return (level / _MAX_LEVEL) * a + b\n\n\ndef random_negative(value, random_negative_prob):\n    \"\"\"Randomly negate value based on random_negative_prob.\"\"\"\n    return -value if np.random.rand() < random_negative_prob else value\n\n\ndef bbox2fields():\n    \"\"\"The key correspondence from bboxes to labels, masks and\n    segmentations.\"\"\"\n    bbox2label = {\n        'gt_bboxes': 'gt_labels',\n        'gt_bboxes_ignore': 'gt_labels_ignore'\n    }\n    bbox2mask = {\n        'gt_bboxes': 'gt_masks',\n        'gt_bboxes_ignore': 'gt_masks_ignore'\n    }\n    bbox2seg = {\n        'gt_bboxes': 'gt_semantic_seg',\n    }\n    return bbox2label, bbox2mask, bbox2seg\n\n\n@PIPELINES.register_module()\nclass AutoAugment:\n    \"\"\"Auto augmentation.\n\n    This data augmentation is proposed in `Learning Data Augmentation\n    Strategies for Object Detection <https://arxiv.org/pdf/1906.11172>`_.\n\n    TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms\n\n    Args:\n        policies (list[list[dict]]): The policies of auto augmentation. Each\n            policy in ``policies`` is a specific augmentation policy, and is\n            composed by several augmentations (dict). When AutoAugment is\n            called, a random policy in ``policies`` will be selected to\n            augment images.\n\n    Examples:\n        >>> replace = (104, 116, 124)\n        >>> policies = [\n        >>>     [\n        >>>         dict(type='Sharpness', prob=0.0, level=8),\n        >>>         dict(\n        >>>             type='Shear',\n        >>>             prob=0.4,\n        >>>             level=0,\n        >>>             replace=replace,\n        >>>             axis='x')\n        >>>     ],\n        >>>     [\n        >>>         dict(\n        >>>             type='Rotate',\n        >>>             prob=0.6,\n        >>>             level=10,\n        >>>             replace=replace),\n        >>>         dict(type='Color', prob=1.0, level=6)\n        >>>     ]\n        >>> ]\n        >>> augmentation = AutoAugment(policies)\n        >>> img = np.ones(100, 100, 3)\n        >>> gt_bboxes = np.ones(10, 4)\n        >>> results = dict(img=img, gt_bboxes=gt_bboxes)\n        >>> results = augmentation(results)\n    \"\"\"\n\n    def __init__(self, policies):\n        assert isinstance(policies, list) and len(policies) > 0, \\\n            'Policies must be a non-empty list.'\n        for policy in policies:\n            assert isinstance(policy, list) and len(policy) > 0, \\\n                'Each policy in policies must be a non-empty list.'\n            for augment in policy:\n                assert isinstance(augment, dict) and 'type' in augment, \\\n                    'Each specific augmentation must be a dict with key' \\\n                    ' \"type\".'\n\n        self.policies = copy.deepcopy(policies)\n        self.transforms = [Compose(policy) for policy in self.policies]\n\n    def __call__(self, results):\n        transform = np.random.choice(self.transforms)\n        return transform(results)\n\n    def __repr__(self):\n        return f'{self.__class__.__name__}(policies={self.policies})'\n\n\n@PIPELINES.register_module()\nclass Shear:\n    \"\"\"Apply Shear Transformation to image (and its corresponding bbox, mask,\n    segmentation).\n\n    Args:\n        level (int | float): The level should be in range [0,_MAX_LEVEL].\n        img_fill_val (int | float | tuple): The filled values for image border.\n            If float, the same fill value will be used for all the three\n            channels of image. If tuple, the should be 3 elements.\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Default 255.\n        prob (float): The probability for performing Shear and should be in\n            range [0, 1].\n        direction (str): The direction for shear, either \"horizontal\"\n            or \"vertical\".\n        max_shear_magnitude (float): The maximum magnitude for Shear\n            transformation.\n        random_negative_prob (float): The probability that turns the\n                offset negative. Should be in range [0,1]\n        interpolation (str): Same as in :func:`mmcv.imshear`.\n    \"\"\"\n\n    def __init__(self,\n                 level,\n                 img_fill_val=128,\n                 seg_ignore_label=255,\n                 prob=0.5,\n                 direction='horizontal',\n                 max_shear_magnitude=0.3,\n                 random_negative_prob=0.5,\n                 interpolation='bilinear'):\n        assert isinstance(level, (int, float)), 'The level must be type ' \\\n            f'int or float, got {type(level)}.'\n        assert 0 <= level <= _MAX_LEVEL, 'The level should be in range ' \\\n            f'[0,{_MAX_LEVEL}], got {level}.'\n        if isinstance(img_fill_val, (float, int)):\n            img_fill_val = tuple([float(img_fill_val)] * 3)\n        elif isinstance(img_fill_val, tuple):\n            assert len(img_fill_val) == 3, 'img_fill_val as tuple must ' \\\n                f'have 3 elements. got {len(img_fill_val)}.'\n            img_fill_val = tuple([float(val) for val in img_fill_val])\n        else:\n            raise ValueError(\n                'img_fill_val must be float or tuple with 3 elements.')\n        assert np.all([0 <= val <= 255 for val in img_fill_val]), 'all ' \\\n            'elements of img_fill_val should between range [0,255].' \\\n            f'got {img_fill_val}.'\n        assert 0 <= prob <= 1.0, 'The probability of shear should be in ' \\\n            f'range [0,1]. got {prob}.'\n        assert direction in ('horizontal', 'vertical'), 'direction must ' \\\n            f'in be either \"horizontal\" or \"vertical\". got {direction}.'\n        assert isinstance(max_shear_magnitude, float), 'max_shear_magnitude ' \\\n            f'should be type float. got {type(max_shear_magnitude)}.'\n        assert 0. <= max_shear_magnitude <= 1., 'Defaultly ' \\\n            'max_shear_magnitude should be in range [0,1]. ' \\\n            f'got {max_shear_magnitude}.'\n        self.level = level\n        self.magnitude = level_to_value(level, max_shear_magnitude)\n        self.img_fill_val = img_fill_val\n        self.seg_ignore_label = seg_ignore_label\n        self.prob = prob\n        self.direction = direction\n        self.max_shear_magnitude = max_shear_magnitude\n        self.random_negative_prob = random_negative_prob\n        self.interpolation = interpolation\n\n    def _shear_img(self,\n                   results,\n                   magnitude,\n                   direction='horizontal',\n                   interpolation='bilinear'):\n        \"\"\"Shear the image.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n            magnitude (int | float): The magnitude used for shear.\n            direction (str): The direction for shear, either \"horizontal\"\n                or \"vertical\".\n            interpolation (str): Same as in :func:`mmcv.imshear`.\n        \"\"\"\n        for key in results.get('img_fields', ['img']):\n            img = results[key]\n            img_sheared = mmcv.imshear(\n                img,\n                magnitude,\n                direction,\n                border_value=self.img_fill_val,\n                interpolation=interpolation)\n            results[key] = img_sheared.astype(img.dtype)\n            results['img_shape'] = results[key].shape\n\n    def _shear_bboxes(self, results, magnitude):\n        \"\"\"Shear the bboxes.\"\"\"\n        h, w, c = results['img_shape']\n        if self.direction == 'horizontal':\n            shear_matrix = np.stack([[1, magnitude],\n                                     [0, 1]]).astype(np.float32)  # [2, 2]\n        else:\n            shear_matrix = np.stack([[1, 0], [magnitude,\n                                              1]]).astype(np.float32)\n        for key in results.get('bbox_fields', []):\n            min_x, min_y, max_x, max_y = np.split(\n                results[key], results[key].shape[-1], axis=-1)\n            coordinates = np.stack([[min_x, min_y], [max_x, min_y],\n                                    [min_x, max_y],\n                                    [max_x, max_y]])  # [4, 2, nb_box, 1]\n            coordinates = coordinates[..., 0].transpose(\n                (2, 1, 0)).astype(np.float32)  # [nb_box, 2, 4]\n            new_coords = np.matmul(shear_matrix[None, :, :],\n                                   coordinates)  # [nb_box, 2, 4]\n            min_x = np.min(new_coords[:, 0, :], axis=-1)\n            min_y = np.min(new_coords[:, 1, :], axis=-1)\n            max_x = np.max(new_coords[:, 0, :], axis=-1)\n            max_y = np.max(new_coords[:, 1, :], axis=-1)\n            min_x = np.clip(min_x, a_min=0, a_max=w)\n            min_y = np.clip(min_y, a_min=0, a_max=h)\n            max_x = np.clip(max_x, a_min=min_x, a_max=w)\n            max_y = np.clip(max_y, a_min=min_y, a_max=h)\n            results[key] = np.stack([min_x, min_y, max_x, max_y],\n                                    axis=-1).astype(results[key].dtype)\n\n    def _shear_masks(self,\n                     results,\n                     magnitude,\n                     direction='horizontal',\n                     fill_val=0,\n                     interpolation='bilinear'):\n        \"\"\"Shear the masks.\"\"\"\n        h, w, c = results['img_shape']\n        for key in results.get('mask_fields', []):\n            masks = results[key]\n            results[key] = masks.shear((h, w),\n                                       magnitude,\n                                       direction,\n                                       border_value=fill_val,\n                                       interpolation=interpolation)\n\n    def _shear_seg(self,\n                   results,\n                   magnitude,\n                   direction='horizontal',\n                   fill_val=255,\n                   interpolation='bilinear'):\n        \"\"\"Shear the segmentation maps.\"\"\"\n        for key in results.get('seg_fields', []):\n            seg = results[key]\n            results[key] = mmcv.imshear(\n                seg,\n                magnitude,\n                direction,\n                border_value=fill_val,\n                interpolation=interpolation).astype(seg.dtype)\n\n    def _filter_invalid(self, results, min_bbox_size=0):\n        \"\"\"Filter bboxes and corresponding masks too small after shear\n        augmentation.\"\"\"\n        bbox2label, bbox2mask, _ = bbox2fields()\n        for key in results.get('bbox_fields', []):\n            bbox_w = results[key][:, 2] - results[key][:, 0]\n            bbox_h = results[key][:, 3] - results[key][:, 1]\n            valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)\n            valid_inds = np.nonzero(valid_inds)[0]\n            results[key] = results[key][valid_inds]\n            # label fields. e.g. gt_labels and gt_labels_ignore\n            label_key = bbox2label.get(key)\n            if label_key in results:\n                results[label_key] = results[label_key][valid_inds]\n            # mask fields, e.g. gt_masks and gt_masks_ignore\n            mask_key = bbox2mask.get(key)\n            if mask_key in results:\n                results[mask_key] = results[mask_key][valid_inds]\n\n    def __call__(self, results):\n        \"\"\"Call function to shear images, bounding boxes, masks and semantic\n        segmentation maps.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Sheared results.\n        \"\"\"\n        if np.random.rand() > self.prob:\n            return results\n        magnitude = random_negative(self.magnitude, self.random_negative_prob)\n        self._shear_img(results, magnitude, self.direction, self.interpolation)\n        self._shear_bboxes(results, magnitude)\n        # fill_val set to 0 for background of mask.\n        self._shear_masks(\n            results,\n            magnitude,\n            self.direction,\n            fill_val=0,\n            interpolation=self.interpolation)\n        self._shear_seg(\n            results,\n            magnitude,\n            self.direction,\n            fill_val=self.seg_ignore_label,\n            interpolation=self.interpolation)\n        self._filter_invalid(results)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(level={self.level}, '\n        repr_str += f'img_fill_val={self.img_fill_val}, '\n        repr_str += f'seg_ignore_label={self.seg_ignore_label}, '\n        repr_str += f'prob={self.prob}, '\n        repr_str += f'direction={self.direction}, '\n        repr_str += f'max_shear_magnitude={self.max_shear_magnitude}, '\n        repr_str += f'random_negative_prob={self.random_negative_prob}, '\n        repr_str += f'interpolation={self.interpolation})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass Rotate:\n    \"\"\"Apply Rotate Transformation to image (and its corresponding bbox, mask,\n    segmentation).\n\n    Args:\n        level (int | float): The level should be in range (0,_MAX_LEVEL].\n        scale (int | float): Isotropic scale factor. Same in\n            ``mmcv.imrotate``.\n        center (int | float | tuple[float]): Center point (w, h) of the\n            rotation in the source image. If None, the center of the\n            image will be used. Same in ``mmcv.imrotate``.\n        img_fill_val (int | float | tuple): The fill value for image border.\n            If float, the same value will be used for all the three\n            channels of image. If tuple, the should be 3 elements (e.g.\n            equals the number of channels for image).\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Default 255.\n        prob (float): The probability for perform transformation and\n            should be in range 0 to 1.\n        max_rotate_angle (int | float): The maximum angles for rotate\n            transformation.\n        random_negative_prob (float): The probability that turns the\n             offset negative.\n    \"\"\"\n\n    def __init__(self,\n                 level,\n                 scale=1,\n                 center=None,\n                 img_fill_val=128,\n                 seg_ignore_label=255,\n                 prob=0.5,\n                 max_rotate_angle=30,\n                 random_negative_prob=0.5):\n        assert isinstance(level, (int, float)), \\\n            f'The level must be type int or float. got {type(level)}.'\n        assert 0 <= level <= _MAX_LEVEL, \\\n            f'The level should be in range (0,{_MAX_LEVEL}]. got {level}.'\n        assert isinstance(scale, (int, float)), \\\n            f'The scale must be type int or float. got type {type(scale)}.'\n        if isinstance(center, (int, float)):\n            center = (center, center)\n        elif isinstance(center, tuple):\n            assert len(center) == 2, 'center with type tuple must have '\\\n                f'2 elements. got {len(center)} elements.'\n        else:\n            assert center is None, 'center must be None or type int, '\\\n                f'float or tuple, got type {type(center)}.'\n        if isinstance(img_fill_val, (float, int)):\n            img_fill_val = tuple([float(img_fill_val)] * 3)\n        elif isinstance(img_fill_val, tuple):\n            assert len(img_fill_val) == 3, 'img_fill_val as tuple must '\\\n                f'have 3 elements. got {len(img_fill_val)}.'\n            img_fill_val = tuple([float(val) for val in img_fill_val])\n        else:\n            raise ValueError(\n                'img_fill_val must be float or tuple with 3 elements.')\n        assert np.all([0 <= val <= 255 for val in img_fill_val]), \\\n            'all elements of img_fill_val should between range [0,255]. '\\\n            f'got {img_fill_val}.'\n        assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\\\n            f'got {prob}.'\n        assert isinstance(max_rotate_angle, (int, float)), 'max_rotate_angle '\\\n            f'should be type int or float. got type {type(max_rotate_angle)}.'\n        self.level = level\n        self.scale = scale\n        # Rotation angle in degrees. Positive values mean\n        # clockwise rotation.\n        self.angle = level_to_value(level, max_rotate_angle)\n        self.center = center\n        self.img_fill_val = img_fill_val\n        self.seg_ignore_label = seg_ignore_label\n        self.prob = prob\n        self.max_rotate_angle = max_rotate_angle\n        self.random_negative_prob = random_negative_prob\n\n    def _rotate_img(self, results, angle, center=None, scale=1.0):\n        \"\"\"Rotate the image.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n            angle (float): Rotation angle in degrees, positive values\n                mean clockwise rotation. Same in ``mmcv.imrotate``.\n            center (tuple[float], optional): Center point (w, h) of the\n                rotation. Same in ``mmcv.imrotate``.\n            scale (int | float): Isotropic scale factor. Same in\n                ``mmcv.imrotate``.\n        \"\"\"\n        for key in results.get('img_fields', ['img']):\n            img = results[key].copy()\n            img_rotated = mmcv.imrotate(\n                img, angle, center, scale, border_value=self.img_fill_val)\n            results[key] = img_rotated.astype(img.dtype)\n            results['img_shape'] = results[key].shape\n\n    def _rotate_bboxes(self, results, rotate_matrix):\n        \"\"\"Rotate the bboxes.\"\"\"\n        h, w, c = results['img_shape']\n        for key in results.get('bbox_fields', []):\n            min_x, min_y, max_x, max_y = np.split(\n                results[key], results[key].shape[-1], axis=-1)\n            coordinates = np.stack([[min_x, min_y], [max_x, min_y],\n                                    [min_x, max_y],\n                                    [max_x, max_y]])  # [4, 2, nb_bbox, 1]\n            # pad 1 to convert from format [x, y] to homogeneous\n            # coordinates format [x, y, 1]\n            coordinates = np.concatenate(\n                (coordinates,\n                 np.ones((4, 1, coordinates.shape[2], 1), coordinates.dtype)),\n                axis=1)  # [4, 3, nb_bbox, 1]\n            coordinates = coordinates.transpose(\n                (2, 0, 1, 3))  # [nb_bbox, 4, 3, 1]\n            rotated_coords = np.matmul(rotate_matrix,\n                                       coordinates)  # [nb_bbox, 4, 2, 1]\n            rotated_coords = rotated_coords[..., 0]  # [nb_bbox, 4, 2]\n            min_x, min_y = np.min(\n                rotated_coords[:, :, 0], axis=1), np.min(\n                    rotated_coords[:, :, 1], axis=1)\n            max_x, max_y = np.max(\n                rotated_coords[:, :, 0], axis=1), np.max(\n                    rotated_coords[:, :, 1], axis=1)\n            min_x, min_y = np.clip(\n                min_x, a_min=0, a_max=w), np.clip(\n                    min_y, a_min=0, a_max=h)\n            max_x, max_y = np.clip(\n                max_x, a_min=min_x, a_max=w), np.clip(\n                    max_y, a_min=min_y, a_max=h)\n            results[key] = np.stack([min_x, min_y, max_x, max_y],\n                                    axis=-1).astype(results[key].dtype)\n\n    def _rotate_masks(self,\n                      results,\n                      angle,\n                      center=None,\n                      scale=1.0,\n                      fill_val=0):\n        \"\"\"Rotate the masks.\"\"\"\n        h, w, c = results['img_shape']\n        for key in results.get('mask_fields', []):\n            masks = results[key]\n            results[key] = masks.rotate((h, w), angle, center, scale, fill_val)\n\n    def _rotate_seg(self,\n                    results,\n                    angle,\n                    center=None,\n                    scale=1.0,\n                    fill_val=255):\n        \"\"\"Rotate the segmentation map.\"\"\"\n        for key in results.get('seg_fields', []):\n            seg = results[key].copy()\n            results[key] = mmcv.imrotate(\n                seg, angle, center, scale,\n                border_value=fill_val).astype(seg.dtype)\n\n    def _filter_invalid(self, results, min_bbox_size=0):\n        \"\"\"Filter bboxes and corresponding masks too small after rotate\n        augmentation.\"\"\"\n        bbox2label, bbox2mask, _ = bbox2fields()\n        for key in results.get('bbox_fields', []):\n            bbox_w = results[key][:, 2] - results[key][:, 0]\n            bbox_h = results[key][:, 3] - results[key][:, 1]\n            valid_inds = (bbox_w > min_bbox_size) & (bbox_h > min_bbox_size)\n            valid_inds = np.nonzero(valid_inds)[0]\n            results[key] = results[key][valid_inds]\n            # label fields. e.g. gt_labels and gt_labels_ignore\n            label_key = bbox2label.get(key)\n            if label_key in results:\n                results[label_key] = results[label_key][valid_inds]\n            # mask fields, e.g. gt_masks and gt_masks_ignore\n            mask_key = bbox2mask.get(key)\n            if mask_key in results:\n                results[mask_key] = results[mask_key][valid_inds]\n\n    def __call__(self, results):\n        \"\"\"Call function to rotate images, bounding boxes, masks and semantic\n        segmentation maps.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Rotated results.\n        \"\"\"\n        if np.random.rand() > self.prob:\n            return results\n        h, w = results['img'].shape[:2]\n        center = self.center\n        if center is None:\n            center = ((w - 1) * 0.5, (h - 1) * 0.5)\n        angle = random_negative(self.angle, self.random_negative_prob)\n        self._rotate_img(results, angle, center, self.scale)\n        rotate_matrix = cv2.getRotationMatrix2D(center, -angle, self.scale)\n        self._rotate_bboxes(results, rotate_matrix)\n        self._rotate_masks(results, angle, center, self.scale, fill_val=0)\n        self._rotate_seg(\n            results, angle, center, self.scale, fill_val=self.seg_ignore_label)\n        self._filter_invalid(results)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(level={self.level}, '\n        repr_str += f'scale={self.scale}, '\n        repr_str += f'center={self.center}, '\n        repr_str += f'img_fill_val={self.img_fill_val}, '\n        repr_str += f'seg_ignore_label={self.seg_ignore_label}, '\n        repr_str += f'prob={self.prob}, '\n        repr_str += f'max_rotate_angle={self.max_rotate_angle}, '\n        repr_str += f'random_negative_prob={self.random_negative_prob})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass Translate:\n    \"\"\"Translate the images, bboxes, masks and segmentation maps horizontally\n    or vertically.\n\n    Args:\n        level (int | float): The level for Translate and should be in\n            range [0,_MAX_LEVEL].\n        prob (float): The probability for performing translation and\n            should be in range [0, 1].\n        img_fill_val (int | float | tuple): The filled value for image\n            border. If float, the same fill value will be used for all\n            the three channels of image. If tuple, the should be 3\n            elements (e.g. equals the number of channels for image).\n        seg_ignore_label (int): The fill value used for segmentation map.\n            Note this value must equals ``ignore_label`` in ``semantic_head``\n            of the corresponding config. Default 255.\n        direction (str): The translate direction, either \"horizontal\"\n            or \"vertical\".\n        max_translate_offset (int | float): The maximum pixel's offset for\n            Translate.\n        random_negative_prob (float): The probability that turns the\n            offset negative.\n        min_size (int | float): The minimum pixel for filtering\n            invalid bboxes after the translation.\n    \"\"\"\n\n    def __init__(self,\n                 level,\n                 prob=0.5,\n                 img_fill_val=128,\n                 seg_ignore_label=255,\n                 direction='horizontal',\n                 max_translate_offset=250.,\n                 random_negative_prob=0.5,\n                 min_size=0):\n        assert isinstance(level, (int, float)), \\\n            'The level must be type int or float.'\n        assert 0 <= level <= _MAX_LEVEL, \\\n            'The level used for calculating Translate\\'s offset should be ' \\\n            'in range [0,_MAX_LEVEL]'\n        assert 0 <= prob <= 1.0, \\\n            'The probability of translation should be in range [0, 1].'\n        if isinstance(img_fill_val, (float, int)):\n            img_fill_val = tuple([float(img_fill_val)] * 3)\n        elif isinstance(img_fill_val, tuple):\n            assert len(img_fill_val) == 3, \\\n                'img_fill_val as tuple must have 3 elements.'\n            img_fill_val = tuple([float(val) for val in img_fill_val])\n        else:\n            raise ValueError('img_fill_val must be type float or tuple.')\n        assert np.all([0 <= val <= 255 for val in img_fill_val]), \\\n            'all elements of img_fill_val should between range [0,255].'\n        assert direction in ('horizontal', 'vertical'), \\\n            'direction should be \"horizontal\" or \"vertical\".'\n        assert isinstance(max_translate_offset, (int, float)), \\\n            'The max_translate_offset must be type int or float.'\n        # the offset used for translation\n        self.offset = int(level_to_value(level, max_translate_offset))\n        self.level = level\n        self.prob = prob\n        self.img_fill_val = img_fill_val\n        self.seg_ignore_label = seg_ignore_label\n        self.direction = direction\n        self.max_translate_offset = max_translate_offset\n        self.random_negative_prob = random_negative_prob\n        self.min_size = min_size\n\n    def _translate_img(self, results, offset, direction='horizontal'):\n        \"\"\"Translate the image.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n            offset (int | float): The offset for translate.\n            direction (str): The translate direction, either \"horizontal\"\n                or \"vertical\".\n        \"\"\"\n        for key in results.get('img_fields', ['img']):\n            img = results[key].copy()\n            results[key] = mmcv.imtranslate(\n                img, offset, direction, self.img_fill_val).astype(img.dtype)\n            results['img_shape'] = results[key].shape\n\n    def _translate_bboxes(self, results, offset):\n        \"\"\"Shift bboxes horizontally or vertically, according to offset.\"\"\"\n        h, w, c = results['img_shape']\n        for key in results.get('bbox_fields', []):\n            min_x, min_y, max_x, max_y = np.split(\n                results[key], results[key].shape[-1], axis=-1)\n            if self.direction == 'horizontal':\n                min_x = np.maximum(0, min_x + offset)\n                max_x = np.minimum(w, max_x + offset)\n            elif self.direction == 'vertical':\n                min_y = np.maximum(0, min_y + offset)\n                max_y = np.minimum(h, max_y + offset)\n\n            # the boxes translated outside of image will be filtered along with\n            # the corresponding masks, by invoking ``_filter_invalid``.\n            results[key] = np.concatenate([min_x, min_y, max_x, max_y],\n                                          axis=-1)\n\n    def _translate_masks(self,\n                         results,\n                         offset,\n                         direction='horizontal',\n                         fill_val=0):\n        \"\"\"Translate masks horizontally or vertically.\"\"\"\n        h, w, c = results['img_shape']\n        for key in results.get('mask_fields', []):\n            masks = results[key]\n            results[key] = masks.translate((h, w), offset, direction, fill_val)\n\n    def _translate_seg(self,\n                       results,\n                       offset,\n                       direction='horizontal',\n                       fill_val=255):\n        \"\"\"Translate segmentation maps horizontally or vertically.\"\"\"\n        for key in results.get('seg_fields', []):\n            seg = results[key].copy()\n            results[key] = mmcv.imtranslate(seg, offset, direction,\n                                            fill_val).astype(seg.dtype)\n\n    def _filter_invalid(self, results, min_size=0):\n        \"\"\"Filter bboxes and masks too small or translated out of image.\"\"\"\n        bbox2label, bbox2mask, _ = bbox2fields()\n        for key in results.get('bbox_fields', []):\n            bbox_w = results[key][:, 2] - results[key][:, 0]\n            bbox_h = results[key][:, 3] - results[key][:, 1]\n            valid_inds = (bbox_w > min_size) & (bbox_h > min_size)\n            valid_inds = np.nonzero(valid_inds)[0]\n            results[key] = results[key][valid_inds]\n            # label fields. e.g. gt_labels and gt_labels_ignore\n            label_key = bbox2label.get(key)\n            if label_key in results:\n                results[label_key] = results[label_key][valid_inds]\n            # mask fields, e.g. gt_masks and gt_masks_ignore\n            mask_key = bbox2mask.get(key)\n            if mask_key in results:\n                results[mask_key] = results[mask_key][valid_inds]\n        return results\n\n    def __call__(self, results):\n        \"\"\"Call function to translate images, bounding boxes, masks and\n        semantic segmentation maps.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Translated results.\n        \"\"\"\n        if np.random.rand() > self.prob:\n            return results\n        offset = random_negative(self.offset, self.random_negative_prob)\n        self._translate_img(results, offset, self.direction)\n        self._translate_bboxes(results, offset)\n        # fill_val defaultly 0 for BitmapMasks and None for PolygonMasks.\n        self._translate_masks(results, offset, self.direction)\n        # fill_val set to ``seg_ignore_label`` for the ignored value\n        # of segmentation map.\n        self._translate_seg(\n            results, offset, self.direction, fill_val=self.seg_ignore_label)\n        self._filter_invalid(results, min_size=self.min_size)\n        return results\n\n\n@PIPELINES.register_module()\nclass ColorTransform:\n    \"\"\"Apply Color transformation to image. The bboxes, masks, and\n    segmentations are not modified.\n\n    Args:\n        level (int | float): Should be in range [0,_MAX_LEVEL].\n        prob (float): The probability for performing Color transformation.\n    \"\"\"\n\n    def __init__(self, level, prob=0.5):\n        assert isinstance(level, (int, float)), \\\n            'The level must be type int or float.'\n        assert 0 <= level <= _MAX_LEVEL, \\\n            'The level should be in range [0,_MAX_LEVEL].'\n        assert 0 <= prob <= 1.0, \\\n            'The probability should be in range [0,1].'\n        self.level = level\n        self.prob = prob\n        self.factor = enhance_level_to_value(level)\n\n    def _adjust_color_img(self, results, factor=1.0):\n        \"\"\"Apply Color transformation to image.\"\"\"\n        for key in results.get('img_fields', ['img']):\n            # NOTE defaultly the image should be BGR format\n            img = results[key]\n            results[key] = mmcv.adjust_color(img, factor).astype(img.dtype)\n\n    def __call__(self, results):\n        \"\"\"Call function for Color transformation.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Colored results.\n        \"\"\"\n        if np.random.rand() > self.prob:\n            return results\n        self._adjust_color_img(results, self.factor)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(level={self.level}, '\n        repr_str += f'prob={self.prob})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass EqualizeTransform:\n    \"\"\"Apply Equalize transformation to image. The bboxes, masks and\n    segmentations are not modified.\n\n    Args:\n        prob (float): The probability for performing Equalize transformation.\n    \"\"\"\n\n    def __init__(self, prob=0.5):\n        assert 0 <= prob <= 1.0, \\\n            'The probability should be in range [0,1].'\n        self.prob = prob\n\n    def _imequalize(self, results):\n        \"\"\"Equalizes the histogram of one image.\"\"\"\n        for key in results.get('img_fields', ['img']):\n            img = results[key]\n            results[key] = mmcv.imequalize(img).astype(img.dtype)\n\n    def __call__(self, results):\n        \"\"\"Call function for Equalize transformation.\n\n        Args:\n            results (dict): Results dict from loading pipeline.\n\n        Returns:\n            dict: Results after the transformation.\n        \"\"\"\n        if np.random.rand() > self.prob:\n            return results\n        self._imequalize(results)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(prob={self.prob})'\n\n\n@PIPELINES.register_module()\nclass BrightnessTransform:\n    \"\"\"Apply Brightness transformation to image. The bboxes, masks and\n    segmentations are not modified.\n\n    Args:\n        level (int | float): Should be in range [0,_MAX_LEVEL].\n        prob (float): The probability for performing Brightness transformation.\n    \"\"\"\n\n    def __init__(self, level, prob=0.5):\n        assert isinstance(level, (int, float)), \\\n            'The level must be type int or float.'\n        assert 0 <= level <= _MAX_LEVEL, \\\n            'The level should be in range [0,_MAX_LEVEL].'\n        assert 0 <= prob <= 1.0, \\\n            'The probability should be in range [0,1].'\n        self.level = level\n        self.prob = prob\n        self.factor = enhance_level_to_value(level)\n\n    def _adjust_brightness_img(self, results, factor=1.0):\n        \"\"\"Adjust the brightness of image.\"\"\"\n        for key in results.get('img_fields', ['img']):\n            img = results[key]\n            results[key] = mmcv.adjust_brightness(img,\n                                                  factor).astype(img.dtype)\n\n    def __call__(self, results):\n        \"\"\"Call function for Brightness transformation.\n\n        Args:\n            results (dict): Results dict from loading pipeline.\n\n        Returns:\n            dict: Results after the transformation.\n        \"\"\"\n        if np.random.rand() > self.prob:\n            return results\n        self._adjust_brightness_img(results, self.factor)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(level={self.level}, '\n        repr_str += f'prob={self.prob})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass ContrastTransform:\n    \"\"\"Apply Contrast transformation to image. The bboxes, masks and\n    segmentations are not modified.\n\n    Args:\n        level (int | float): Should be in range [0,_MAX_LEVEL].\n        prob (float): The probability for performing Contrast transformation.\n    \"\"\"\n\n    def __init__(self, level, prob=0.5):\n        assert isinstance(level, (int, float)), \\\n            'The level must be type int or float.'\n        assert 0 <= level <= _MAX_LEVEL, \\\n            'The level should be in range [0,_MAX_LEVEL].'\n        assert 0 <= prob <= 1.0, \\\n            'The probability should be in range [0,1].'\n        self.level = level\n        self.prob = prob\n        self.factor = enhance_level_to_value(level)\n\n    def _adjust_contrast_img(self, results, factor=1.0):\n        \"\"\"Adjust the image contrast.\"\"\"\n        for key in results.get('img_fields', ['img']):\n            img = results[key]\n            results[key] = mmcv.adjust_contrast(img, factor).astype(img.dtype)\n\n    def __call__(self, results):\n        \"\"\"Call function for Contrast transformation.\n\n        Args:\n            results (dict): Results dict from loading pipeline.\n\n        Returns:\n            dict: Results after the transformation.\n        \"\"\"\n        if np.random.rand() > self.prob:\n            return results\n        self._adjust_contrast_img(results, self.factor)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(level={self.level}, '\n        repr_str += f'prob={self.prob})'\n        return repr_str\n"
  },
  {
    "path": "mmdet/datasets/pipelines/compose.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport collections\n\nfrom mmcv.utils import build_from_cfg\n\nfrom ..builder import PIPELINES\n\n\n@PIPELINES.register_module()\nclass Compose:\n    \"\"\"Compose multiple transforms sequentially.\n\n    Args:\n        transforms (Sequence[dict | callable]): Sequence of transform object or\n            config dict to be composed.\n    \"\"\"\n\n    def __init__(self, transforms):\n        assert isinstance(transforms, collections.abc.Sequence)\n        self.transforms = []\n        for transform in transforms:\n            if isinstance(transform, dict):\n                transform = build_from_cfg(transform, PIPELINES)\n                self.transforms.append(transform)\n            elif callable(transform):\n                self.transforms.append(transform)\n            else:\n                raise TypeError('transform must be callable or a dict')\n\n    def __call__(self, data):\n        \"\"\"Call function to apply transforms sequentially.\n\n        Args:\n            data (dict): A result dict contains the data to transform.\n\n        Returns:\n           dict: Transformed data.\n        \"\"\"\n\n        for t in self.transforms:\n            data = t(data)\n            if data is None:\n                return None\n        return data\n\n    def __repr__(self):\n        format_string = self.__class__.__name__ + '('\n        for t in self.transforms:\n            str_ = t.__repr__()\n            if 'Compose(' in str_:\n                str_ = str_.replace('\\n', '\\n    ')\n            format_string += '\\n'\n            format_string += f'    {str_}'\n        format_string += '\\n)'\n        return format_string\n"
  },
  {
    "path": "mmdet/datasets/pipelines/formating.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# flake8: noqa\nimport warnings\n\nfrom .formatting import *\n\nwarnings.warn('DeprecationWarning: mmdet.datasets.pipelines.formating will be '\n              'deprecated, please replace it with '\n              'mmdet.datasets.pipelines.formatting.')\n"
  },
  {
    "path": "mmdet/datasets/pipelines/formatting.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections.abc import Sequence\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom mmcv.parallel import DataContainer as DC\n\nfrom ..builder import PIPELINES\n\n\ndef to_tensor(data):\n    \"\"\"Convert objects of various python types to :obj:`torch.Tensor`.\n\n    Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,\n    :class:`Sequence`, :class:`int` and :class:`float`.\n\n    Args:\n        data (torch.Tensor | numpy.ndarray | Sequence | int | float): Data to\n            be converted.\n    \"\"\"\n\n    if isinstance(data, torch.Tensor):\n        return data\n    elif isinstance(data, np.ndarray):\n        return torch.from_numpy(data)\n    elif isinstance(data, Sequence) and not mmcv.is_str(data):\n        return torch.tensor(data)\n    elif isinstance(data, int):\n        return torch.LongTensor([data])\n    elif isinstance(data, float):\n        return torch.FloatTensor([data])\n    else:\n        raise TypeError(f'type {type(data)} cannot be converted to tensor.')\n\n\n@PIPELINES.register_module()\nclass ToTensor:\n    \"\"\"Convert some results to :obj:`torch.Tensor` by given keys.\n\n    Args:\n        keys (Sequence[str]): Keys that need to be converted to Tensor.\n    \"\"\"\n\n    def __init__(self, keys):\n        self.keys = keys\n\n    def __call__(self, results):\n        \"\"\"Call function to convert data in results to :obj:`torch.Tensor`.\n\n        Args:\n            results (dict): Result dict contains the data to convert.\n\n        Returns:\n            dict: The result dict contains the data converted\n                to :obj:`torch.Tensor`.\n        \"\"\"\n        for key in self.keys:\n            results[key] = to_tensor(results[key])\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + f'(keys={self.keys})'\n\n\n@PIPELINES.register_module()\nclass ImageToTensor:\n    \"\"\"Convert image to :obj:`torch.Tensor` by given keys.\n\n    The dimension order of input image is (H, W, C). The pipeline will convert\n    it to (C, H, W). If only 2 dimension (H, W) is given, the output would be\n    (1, H, W).\n\n    Args:\n        keys (Sequence[str]): Key of images to be converted to Tensor.\n    \"\"\"\n\n    def __init__(self, keys):\n        self.keys = keys\n\n    def __call__(self, results):\n        \"\"\"Call function to convert image in results to :obj:`torch.Tensor` and\n        permute the channel order.\n\n        Args:\n            results (dict): Result dict contains the image data to convert.\n\n        Returns:\n            dict: The result dict contains the image converted\n                to :obj:`torch.Tensor` and permuted to (C, H, W) order.\n        \"\"\"\n        for key in self.keys:\n            img = results[key]\n            if len(img.shape) < 3:\n                img = np.expand_dims(img, -1)\n            results[key] = to_tensor(img).permute(2, 0, 1).contiguous()\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + f'(keys={self.keys})'\n\n\n@PIPELINES.register_module()\nclass Transpose:\n    \"\"\"Transpose some results by given keys.\n\n    Args:\n        keys (Sequence[str]): Keys of results to be transposed.\n        order (Sequence[int]): Order of transpose.\n    \"\"\"\n\n    def __init__(self, keys, order):\n        self.keys = keys\n        self.order = order\n\n    def __call__(self, results):\n        \"\"\"Call function to transpose the channel order of data in results.\n\n        Args:\n            results (dict): Result dict contains the data to transpose.\n\n        Returns:\n            dict: The result dict contains the data transposed to \\\n                ``self.order``.\n        \"\"\"\n        for key in self.keys:\n            results[key] = results[key].transpose(self.order)\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + \\\n               f'(keys={self.keys}, order={self.order})'\n\n\n@PIPELINES.register_module()\nclass ToDataContainer:\n    \"\"\"Convert results to :obj:`mmcv.DataContainer` by given fields.\n\n    Args:\n        fields (Sequence[dict]): Each field is a dict like\n            ``dict(key='xxx', **kwargs)``. The ``key`` in result will\n            be converted to :obj:`mmcv.DataContainer` with ``**kwargs``.\n            Default: ``(dict(key='img', stack=True), dict(key='gt_bboxes'),\n            dict(key='gt_labels'))``.\n    \"\"\"\n\n    def __init__(self,\n                 fields=(dict(key='img', stack=True), dict(key='gt_bboxes'),\n                         dict(key='gt_labels'))):\n        self.fields = fields\n\n    def __call__(self, results):\n        \"\"\"Call function to convert data in results to\n        :obj:`mmcv.DataContainer`.\n\n        Args:\n            results (dict): Result dict contains the data to convert.\n\n        Returns:\n            dict: The result dict contains the data converted to \\\n                :obj:`mmcv.DataContainer`.\n        \"\"\"\n\n        for field in self.fields:\n            field = field.copy()\n            key = field.pop('key')\n            results[key] = DC(results[key], **field)\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + f'(fields={self.fields})'\n\n\n@PIPELINES.register_module()\nclass DefaultFormatBundle:\n    \"\"\"Default formatting bundle.\n\n    It simplifies the pipeline of formatting common fields, including \"img\",\n    \"proposals\", \"gt_bboxes\", \"gt_labels\", \"gt_masks\" and \"gt_semantic_seg\".\n    These fields are formatted as follows.\n\n    - img: (1)transpose & to tensor, (2)to DataContainer (stack=True)\n    - proposals: (1)to tensor, (2)to DataContainer\n    - gt_bboxes: (1)to tensor, (2)to DataContainer\n    - gt_bboxes_ignore: (1)to tensor, (2)to DataContainer\n    - gt_labels: (1)to tensor, (2)to DataContainer\n    - gt_masks: (1)to tensor, (2)to DataContainer (cpu_only=True)\n    - gt_semantic_seg: (1)unsqueeze dim-0 (2)to tensor, \\\n                       (3)to DataContainer (stack=True)\n\n    Args:\n        img_to_float (bool): Whether to force the image to be converted to\n            float type. Default: True.\n        pad_val (dict): A dict for padding value in batch collating,\n            the default value is `dict(img=0, masks=0, seg=255)`.\n            Without this argument, the padding value of \"gt_semantic_seg\"\n            will be set to 0 by default, which should be 255.\n    \"\"\"\n\n    def __init__(self,\n                 img_to_float=True,\n                 pad_val=dict(img=0, masks=0, seg=255)):\n        self.img_to_float = img_to_float\n        self.pad_val = pad_val\n\n    def __call__(self, results):\n        \"\"\"Call function to transform and format common fields in results.\n\n        Args:\n            results (dict): Result dict contains the data to convert.\n\n        Returns:\n            dict: The result dict contains the data that is formatted with \\\n                default bundle.\n        \"\"\"\n\n        if 'img' in results:\n            img = results['img']\n            if self.img_to_float is True and img.dtype == np.uint8:\n                # Normally, image is of uint8 type without normalization.\n                # At this time, it needs to be forced to be converted to\n                # flot32, otherwise the model training and inference\n                # will be wrong. Only used for YOLOX currently .\n                img = img.astype(np.float32)\n            # add default meta keys\n            results = self._add_default_meta_keys(results)\n            if len(img.shape) < 3:\n                img = np.expand_dims(img, -1)\n            # To improve the computational speed by by 3-5 times, apply:\n            # If image is not contiguous, use\n            # `numpy.transpose()` followed by `numpy.ascontiguousarray()`\n            # If image is already contiguous, use\n            # `torch.permute()` followed by `torch.contiguous()`\n            # Refer to https://github.com/open-mmlab/mmdetection/pull/9533\n            # for more details\n            if not img.flags.c_contiguous:\n                img = np.ascontiguousarray(img.transpose(2, 0, 1))\n                img = to_tensor(img)\n            else:\n                img = to_tensor(img).permute(2, 0, 1).contiguous()\n            results['img'] = DC(\n                img, padding_value=self.pad_val['img'], stack=True)\n        for key in ['proposals', 'gt_bboxes', 'gt_bboxes_ignore', 'gt_labels']:\n            if key not in results:\n                continue\n            results[key] = DC(to_tensor(results[key]))\n        if 'gt_masks' in results:\n            results['gt_masks'] = DC(\n                results['gt_masks'],\n                padding_value=self.pad_val['masks'],\n                cpu_only=True)\n        if 'gt_semantic_seg' in results:\n            results['gt_semantic_seg'] = DC(\n                to_tensor(results['gt_semantic_seg'][None, ...]),\n                padding_value=self.pad_val['seg'],\n                stack=True)\n        return results\n\n    def _add_default_meta_keys(self, results):\n        \"\"\"Add default meta keys.\n\n        We set default meta keys including `pad_shape`, `scale_factor` and\n        `img_norm_cfg` to avoid the case where no `Resize`, `Normalize` and\n        `Pad` are implemented during the whole pipeline.\n\n        Args:\n            results (dict): Result dict contains the data to convert.\n\n        Returns:\n            results (dict): Updated result dict contains the data to convert.\n        \"\"\"\n        img = results['img']\n        results.setdefault('pad_shape', img.shape)\n        results.setdefault('scale_factor', 1.0)\n        num_channels = 1 if len(img.shape) < 3 else img.shape[2]\n        results.setdefault(\n            'img_norm_cfg',\n            dict(\n                mean=np.zeros(num_channels, dtype=np.float32),\n                std=np.ones(num_channels, dtype=np.float32),\n                to_rgb=False))\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + \\\n               f'(img_to_float={self.img_to_float})'\n\n\n@PIPELINES.register_module()\nclass Collect:\n    \"\"\"Collect data from the loader relevant to the specific task.\n\n    This is usually the last stage of the data loader pipeline. Typically keys\n    is set to some subset of \"img\", \"proposals\", \"gt_bboxes\",\n    \"gt_bboxes_ignore\", \"gt_labels\", and/or \"gt_masks\".\n\n    The \"img_meta\" item is always populated.  The contents of the \"img_meta\"\n    dictionary depends on \"meta_keys\". By default this includes:\n\n        - \"img_shape\": shape of the image input to the network as a tuple \\\n            (h, w, c).  Note that images may be zero padded on the \\\n            bottom/right if the batch tensor is larger than this shape.\n\n        - \"scale_factor\": a float indicating the preprocessing scale\n\n        - \"flip\": a boolean indicating if image flip transform was used\n\n        - \"filename\": path to the image file\n\n        - \"ori_shape\": original shape of the image as a tuple (h, w, c)\n\n        - \"pad_shape\": image shape after padding\n\n        - \"img_norm_cfg\": a dict of normalization information:\n\n            - mean - per channel mean subtraction\n            - std - per channel std divisor\n            - to_rgb - bool indicating if bgr was converted to rgb\n\n    Args:\n        keys (Sequence[str]): Keys of results to be collected in ``data``.\n        meta_keys (Sequence[str], optional): Meta keys to be converted to\n            ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n            Default: ``('filename', 'ori_filename', 'ori_shape', 'img_shape',\n            'pad_shape', 'scale_factor', 'flip', 'flip_direction',\n            'img_norm_cfg')``\n    \"\"\"\n\n    def __init__(self,\n                 keys,\n                 meta_keys=('filename', 'ori_filename', 'ori_shape',\n                            'img_shape', 'pad_shape', 'scale_factor', 'flip',\n                            'flip_direction', 'img_norm_cfg')):\n        self.keys = keys\n        self.meta_keys = meta_keys\n\n    def __call__(self, results):\n        \"\"\"Call function to collect keys in results. The keys in ``meta_keys``\n        will be converted to :obj:mmcv.DataContainer.\n\n        Args:\n            results (dict): Result dict contains the data to collect.\n\n        Returns:\n            dict: The result dict contains the following keys\n\n                - keys in``self.keys``\n                - ``img_metas``\n        \"\"\"\n\n        data = {}\n        img_meta = {}\n        for key in self.meta_keys:\n            img_meta[key] = results[key]\n        data['img_metas'] = DC(img_meta, cpu_only=True)\n        for key in self.keys:\n            data[key] = results[key]\n        return data\n\n    def __repr__(self):\n        return self.__class__.__name__ + \\\n               f'(keys={self.keys}, meta_keys={self.meta_keys})'\n\n\n@PIPELINES.register_module()\nclass WrapFieldsToLists:\n    \"\"\"Wrap fields of the data dictionary into lists for evaluation.\n\n    This class can be used as a last step of a test or validation\n    pipeline for single image evaluation or inference.\n\n    Example:\n        >>> test_pipeline = [\n        >>>    dict(type='LoadImageFromFile'),\n        >>>    dict(type='Normalize',\n                    mean=[123.675, 116.28, 103.53],\n                    std=[58.395, 57.12, 57.375],\n                    to_rgb=True),\n        >>>    dict(type='Pad', size_divisor=32),\n        >>>    dict(type='ImageToTensor', keys=['img']),\n        >>>    dict(type='Collect', keys=['img']),\n        >>>    dict(type='WrapFieldsToLists')\n        >>> ]\n    \"\"\"\n\n    def __call__(self, results):\n        \"\"\"Call function to wrap fields into lists.\n\n        Args:\n            results (dict): Result dict contains the data to wrap.\n\n        Returns:\n            dict: The result dict where value of ``self.keys`` are wrapped \\\n                into list.\n        \"\"\"\n\n        # Wrap dict fields into lists\n        for key, val in results.items():\n            results[key] = [val]\n        return results\n\n    def __repr__(self):\n        return f'{self.__class__.__name__}()'\n"
  },
  {
    "path": "mmdet/datasets/pipelines/instaboost.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\n\nfrom ..builder import PIPELINES\n\n\n@PIPELINES.register_module()\nclass InstaBoost:\n    r\"\"\"Data augmentation method in `InstaBoost: Boosting Instance\n    Segmentation Via Probability Map Guided Copy-Pasting\n    <https://arxiv.org/abs/1908.07801>`_.\n\n    Refer to https://github.com/GothicAi/Instaboost for implementation details.\n\n    Args:\n        action_candidate (tuple): Action candidates. \"normal\", \"horizontal\", \\\n            \"vertical\", \"skip\" are supported. Default: ('normal', \\\n            'horizontal', 'skip').\n        action_prob (tuple): Corresponding action probabilities. Should be \\\n            the same length as action_candidate. Default: (1, 0, 0).\n        scale (tuple): (min scale, max scale). Default: (0.8, 1.2).\n        dx (int): The maximum x-axis shift will be (instance width) / dx.\n            Default 15.\n        dy (int): The maximum y-axis shift will be (instance height) / dy.\n            Default 15.\n        theta (tuple): (min rotation degree, max rotation degree). \\\n            Default: (-1, 1).\n        color_prob (float): Probability of images for color augmentation.\n            Default 0.5.\n        heatmap_flag (bool): Whether to use heatmap guided. Default False.\n        aug_ratio (float): Probability of applying this transformation. \\\n            Default 0.5.\n    \"\"\"\n\n    def __init__(self,\n                 action_candidate=('normal', 'horizontal', 'skip'),\n                 action_prob=(1, 0, 0),\n                 scale=(0.8, 1.2),\n                 dx=15,\n                 dy=15,\n                 theta=(-1, 1),\n                 color_prob=0.5,\n                 hflag=False,\n                 aug_ratio=0.5):\n        try:\n            import instaboostfast as instaboost\n        except ImportError:\n            raise ImportError(\n                'Please run \"pip install instaboostfast\" '\n                'to install instaboostfast first for instaboost augmentation.')\n        self.cfg = instaboost.InstaBoostConfig(action_candidate, action_prob,\n                                               scale, dx, dy, theta,\n                                               color_prob, hflag)\n        self.aug_ratio = aug_ratio\n\n    def _load_anns(self, results):\n        labels = results['ann_info']['labels']\n        masks = results['ann_info']['masks']\n        bboxes = results['ann_info']['bboxes']\n        n = len(labels)\n\n        anns = []\n        for i in range(n):\n            label = labels[i]\n            bbox = bboxes[i]\n            mask = masks[i]\n            x1, y1, x2, y2 = bbox\n            # assert (x2 - x1) >= 1 and (y2 - y1) >= 1\n            bbox = [x1, y1, x2 - x1, y2 - y1]\n            anns.append({\n                'category_id': label,\n                'segmentation': mask,\n                'bbox': bbox\n            })\n\n        return anns\n\n    def _parse_anns(self, results, anns, img):\n        gt_bboxes = []\n        gt_labels = []\n        gt_masks_ann = []\n        for ann in anns:\n            x1, y1, w, h = ann['bbox']\n            # TODO: more essential bug need to be fixed in instaboost\n            if w <= 0 or h <= 0:\n                continue\n            bbox = [x1, y1, x1 + w, y1 + h]\n            gt_bboxes.append(bbox)\n            gt_labels.append(ann['category_id'])\n            gt_masks_ann.append(ann['segmentation'])\n        gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n        gt_labels = np.array(gt_labels, dtype=np.int64)\n        results['ann_info']['labels'] = gt_labels\n        results['ann_info']['bboxes'] = gt_bboxes\n        results['ann_info']['masks'] = gt_masks_ann\n        results['img'] = img\n        return results\n\n    def __call__(self, results):\n        img = results['img']\n        ori_type = img.dtype\n        anns = self._load_anns(results)\n        if np.random.choice([0, 1], p=[1 - self.aug_ratio, self.aug_ratio]):\n            try:\n                import instaboostfast as instaboost\n            except ImportError:\n                raise ImportError('Please run \"pip install instaboostfast\" '\n                                  'to install instaboostfast first.')\n            anns, img = instaboost.get_new_data(\n                anns, img.astype(np.uint8), self.cfg, background=None)\n\n        results = self._parse_anns(results, anns, img.astype(ori_type))\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(cfg={self.cfg}, aug_ratio={self.aug_ratio})'\n        return repr_str\n"
  },
  {
    "path": "mmdet/datasets/pipelines/loading.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\n\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as maskUtils\n\nfrom mmdet.core import BitmapMasks, PolygonMasks\nfrom ..builder import PIPELINES\n\ntry:\n    from panopticapi.utils import rgb2id\nexcept ImportError:\n    rgb2id = None\n\n\n@PIPELINES.register_module()\nclass LoadImageFromFile:\n    \"\"\"Load an image from file.\n\n    Required keys are \"img_prefix\" and \"img_info\" (a dict that must contain the\n    key \"filename\"). Added or updated keys are \"filename\", \"img\", \"img_shape\",\n    \"ori_shape\" (same as `img_shape`), \"pad_shape\" (same as `img_shape`),\n    \"scale_factor\" (1.0) and \"img_norm_cfg\" (means=0 and stds=1).\n\n    Args:\n        to_float32 (bool): Whether to convert the loaded image to a float32\n            numpy array. If set to False, the loaded image is an uint8 array.\n            Defaults to False.\n        color_type (str): The flag argument for :func:`mmcv.imfrombytes`.\n            Defaults to 'color'.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmcv.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(self,\n                 to_float32=False,\n                 color_type='color',\n                 channel_order='bgr',\n                 file_client_args=dict(backend='disk')):\n        self.to_float32 = to_float32\n        self.color_type = color_type\n        self.channel_order = channel_order\n        self.file_client_args = file_client_args.copy()\n        self.file_client = None\n\n    def __call__(self, results):\n        \"\"\"Call functions to load image and get image meta information.\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded image and meta information.\n        \"\"\"\n\n        if self.file_client is None:\n            self.file_client = mmcv.FileClient(**self.file_client_args)\n\n        if results['img_prefix'] is not None:\n            filename = osp.join(results['img_prefix'],\n                                results['img_info']['filename'])\n        else:\n            filename = results['img_info']['filename']\n\n        img_bytes = self.file_client.get(filename)\n        img = mmcv.imfrombytes(\n            img_bytes, flag=self.color_type, channel_order=self.channel_order)\n        if self.to_float32:\n            img = img.astype(np.float32)\n\n        results['filename'] = filename\n        results['ori_filename'] = results['img_info']['filename']\n        results['img'] = img\n        results['img_shape'] = img.shape\n        results['ori_shape'] = img.shape\n        results['img_fields'] = ['img']\n        return results\n\n    def __repr__(self):\n        repr_str = (f'{self.__class__.__name__}('\n                    f'to_float32={self.to_float32}, '\n                    f\"color_type='{self.color_type}', \"\n                    f\"channel_order='{self.channel_order}', \"\n                    f'file_client_args={self.file_client_args})')\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass LoadImageFromWebcam(LoadImageFromFile):\n    \"\"\"Load an image from webcam.\n\n    Similar with :obj:`LoadImageFromFile`, but the image read from webcam is in\n    ``results['img']``.\n    \"\"\"\n\n    def __call__(self, results):\n        \"\"\"Call functions to add image meta information.\n\n        Args:\n            results (dict): Result dict with Webcam read image in\n                ``results['img']``.\n\n        Returns:\n            dict: The dict contains loaded image and meta information.\n        \"\"\"\n\n        img = results['img']\n        if self.to_float32:\n            img = img.astype(np.float32)\n\n        results['filename'] = None\n        results['ori_filename'] = None\n        results['img'] = img\n        results['img_shape'] = img.shape\n        results['ori_shape'] = img.shape\n        results['img_fields'] = ['img']\n        return results\n\n\n@PIPELINES.register_module()\nclass LoadMultiChannelImageFromFiles:\n    \"\"\"Load multi-channel images from a list of separate channel files.\n\n    Required keys are \"img_prefix\" and \"img_info\" (a dict that must contain the\n    key \"filename\", which is expected to be a list of filenames).\n    Added or updated keys are \"filename\", \"img\", \"img_shape\",\n    \"ori_shape\" (same as `img_shape`), \"pad_shape\" (same as `img_shape`),\n    \"scale_factor\" (1.0) and \"img_norm_cfg\" (means=0 and stds=1).\n\n    Args:\n        to_float32 (bool): Whether to convert the loaded image to a float32\n            numpy array. If set to False, the loaded image is an uint8 array.\n            Defaults to False.\n        color_type (str): The flag argument for :func:`mmcv.imfrombytes`.\n            Defaults to 'color'.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmcv.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(self,\n                 to_float32=False,\n                 color_type='unchanged',\n                 file_client_args=dict(backend='disk')):\n        self.to_float32 = to_float32\n        self.color_type = color_type\n        self.file_client_args = file_client_args.copy()\n        self.file_client = None\n\n    def __call__(self, results):\n        \"\"\"Call functions to load multiple images and get images meta\n        information.\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded images and meta information.\n        \"\"\"\n\n        if self.file_client is None:\n            self.file_client = mmcv.FileClient(**self.file_client_args)\n\n        if results['img_prefix'] is not None:\n            filename = [\n                osp.join(results['img_prefix'], fname)\n                for fname in results['img_info']['filename']\n            ]\n        else:\n            filename = results['img_info']['filename']\n\n        img = []\n        for name in filename:\n            img_bytes = self.file_client.get(name)\n            img.append(mmcv.imfrombytes(img_bytes, flag=self.color_type))\n        img = np.stack(img, axis=-1)\n        if self.to_float32:\n            img = img.astype(np.float32)\n\n        results['filename'] = filename\n        results['ori_filename'] = results['img_info']['filename']\n        results['img'] = img\n        results['img_shape'] = img.shape\n        results['ori_shape'] = img.shape\n        # Set initial values for default meta_keys\n        results['pad_shape'] = img.shape\n        results['scale_factor'] = 1.0\n        num_channels = 1 if len(img.shape) < 3 else img.shape[2]\n        results['img_norm_cfg'] = dict(\n            mean=np.zeros(num_channels, dtype=np.float32),\n            std=np.ones(num_channels, dtype=np.float32),\n            to_rgb=False)\n        return results\n\n    def __repr__(self):\n        repr_str = (f'{self.__class__.__name__}('\n                    f'to_float32={self.to_float32}, '\n                    f\"color_type='{self.color_type}', \"\n                    f'file_client_args={self.file_client_args})')\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass LoadAnnotations:\n    \"\"\"Load multiple types of annotations.\n\n    Args:\n        with_bbox (bool): Whether to parse and load the bbox annotation.\n             Default: True.\n        with_label (bool): Whether to parse and load the label annotation.\n            Default: True.\n        with_mask (bool): Whether to parse and load the mask annotation.\n             Default: False.\n        with_seg (bool): Whether to parse and load the semantic segmentation\n            annotation. Default: False.\n        poly2mask (bool): Whether to convert the instance masks from polygons\n            to bitmaps. Default: True.\n        denorm_bbox (bool): Whether to convert bbox from relative value to\n            absolute value. Only used in OpenImage Dataset.\n            Default: False.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmcv.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(self,\n                 with_bbox=True,\n                 with_label=True,\n                 with_mask=False,\n                 with_seg=False,\n                 poly2mask=True,\n                 denorm_bbox=False,\n                 file_client_args=dict(backend='disk')):\n        self.with_bbox = with_bbox\n        self.with_label = with_label\n        self.with_mask = with_mask\n        self.with_seg = with_seg\n        self.poly2mask = poly2mask\n        self.denorm_bbox = denorm_bbox\n        self.file_client_args = file_client_args.copy()\n        self.file_client = None\n\n    def _load_bboxes(self, results):\n        \"\"\"Private function to load bounding box annotations.\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded bounding box annotations.\n        \"\"\"\n\n        ann_info = results['ann_info']\n        results['gt_bboxes'] = ann_info['bboxes'].copy()\n\n        if self.denorm_bbox:\n            bbox_num = results['gt_bboxes'].shape[0]\n            if bbox_num != 0:\n                h, w = results['img_shape'][:2]\n                results['gt_bboxes'][:, 0::2] *= w\n                results['gt_bboxes'][:, 1::2] *= h\n\n        gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)\n        if gt_bboxes_ignore is not None:\n            results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()\n            results['bbox_fields'].append('gt_bboxes_ignore')\n        results['bbox_fields'].append('gt_bboxes')\n\n        gt_is_group_ofs = ann_info.get('gt_is_group_ofs', None)\n        if gt_is_group_ofs is not None:\n            results['gt_is_group_ofs'] = gt_is_group_ofs.copy()\n\n        return results\n\n    def _load_labels(self, results):\n        \"\"\"Private function to load label annotations.\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded label annotations.\n        \"\"\"\n\n        results['gt_labels'] = results['ann_info']['labels'].copy()\n        return results\n\n    def _poly2mask(self, mask_ann, img_h, img_w):\n        \"\"\"Private function to convert masks represented with polygon to\n        bitmaps.\n\n        Args:\n            mask_ann (list | dict): Polygon mask annotation input.\n            img_h (int): The height of output mask.\n            img_w (int): The width of output mask.\n\n        Returns:\n            numpy.ndarray: The decode bitmap mask of shape (img_h, img_w).\n        \"\"\"\n\n        if isinstance(mask_ann, list):\n            # polygon -- a single object might consist of multiple parts\n            # we merge all parts into one mask rle code\n            rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n            rle = maskUtils.merge(rles)\n        elif isinstance(mask_ann['counts'], list):\n            # uncompressed RLE\n            rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n        else:\n            # rle\n            rle = mask_ann\n        mask = maskUtils.decode(rle)\n        return mask\n\n    def process_polygons(self, polygons):\n        \"\"\"Convert polygons to list of ndarray and filter invalid polygons.\n\n        Args:\n            polygons (list[list]): Polygons of one instance.\n\n        Returns:\n            list[numpy.ndarray]: Processed polygons.\n        \"\"\"\n\n        polygons = [np.array(p) for p in polygons]\n        valid_polygons = []\n        for polygon in polygons:\n            if len(polygon) % 2 == 0 and len(polygon) >= 6:\n                valid_polygons.append(polygon)\n        return valid_polygons\n\n    def _load_masks(self, results):\n        \"\"\"Private function to load mask annotations.\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded mask annotations.\n                If ``self.poly2mask`` is set ``True``, `gt_mask` will contain\n                :obj:`PolygonMasks`. Otherwise, :obj:`BitmapMasks` is used.\n        \"\"\"\n\n        h, w = results['img_info']['height'], results['img_info']['width']\n        gt_masks = results['ann_info']['masks']\n        if self.poly2mask:\n            gt_masks = BitmapMasks(\n                [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)\n        else:\n            gt_masks = PolygonMasks(\n                [self.process_polygons(polygons) for polygons in gt_masks], h,\n                w)\n        results['gt_masks'] = gt_masks\n        results['mask_fields'].append('gt_masks')\n        return results\n\n    def _load_semantic_seg(self, results):\n        \"\"\"Private function to load semantic segmentation annotations.\n\n        Args:\n            results (dict): Result dict from :obj:`dataset`.\n\n        Returns:\n            dict: The dict contains loaded semantic segmentation annotations.\n        \"\"\"\n\n        if self.file_client is None:\n            self.file_client = mmcv.FileClient(**self.file_client_args)\n\n        filename = osp.join(results['seg_prefix'],\n                            results['ann_info']['seg_map'])\n        img_bytes = self.file_client.get(filename)\n        results['gt_semantic_seg'] = mmcv.imfrombytes(\n            img_bytes, flag='unchanged').squeeze()\n        results['seg_fields'].append('gt_semantic_seg')\n        return results\n\n    def __call__(self, results):\n        \"\"\"Call function to load multiple types annotations.\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded bounding box, label, mask and\n                semantic segmentation annotations.\n        \"\"\"\n\n        if self.with_bbox:\n            results = self._load_bboxes(results)\n            if results is None:\n                return None\n        if self.with_label:\n            results = self._load_labels(results)\n        if self.with_mask:\n            results = self._load_masks(results)\n        if self.with_seg:\n            results = self._load_semantic_seg(results)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(with_bbox={self.with_bbox}, '\n        repr_str += f'with_label={self.with_label}, '\n        repr_str += f'with_mask={self.with_mask}, '\n        repr_str += f'with_seg={self.with_seg}, '\n        repr_str += f'poly2mask={self.poly2mask}, '\n        repr_str += f'file_client_args={self.file_client_args})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass LoadPanopticAnnotations(LoadAnnotations):\n    \"\"\"Load multiple types of panoptic annotations.\n\n    Args:\n        with_bbox (bool): Whether to parse and load the bbox annotation.\n             Default: True.\n        with_label (bool): Whether to parse and load the label annotation.\n            Default: True.\n        with_mask (bool): Whether to parse and load the mask annotation.\n             Default: True.\n        with_seg (bool): Whether to parse and load the semantic segmentation\n            annotation. Default: True.\n        file_client_args (dict): Arguments to instantiate a FileClient.\n            See :class:`mmcv.fileio.FileClient` for details.\n            Defaults to ``dict(backend='disk')``.\n    \"\"\"\n\n    def __init__(self,\n                 with_bbox=True,\n                 with_label=True,\n                 with_mask=True,\n                 with_seg=True,\n                 file_client_args=dict(backend='disk')):\n        if rgb2id is None:\n            raise RuntimeError(\n                'panopticapi is not installed, please install it by: '\n                'pip install git+https://github.com/cocodataset/'\n                'panopticapi.git.')\n\n        super(LoadPanopticAnnotations, self).__init__(\n            with_bbox=with_bbox,\n            with_label=with_label,\n            with_mask=with_mask,\n            with_seg=with_seg,\n            poly2mask=True,\n            denorm_bbox=False,\n            file_client_args=file_client_args)\n\n    def _load_masks_and_semantic_segs(self, results):\n        \"\"\"Private function to load mask and semantic segmentation annotations.\n\n        In gt_semantic_seg, the foreground label is from `0` to\n        `num_things - 1`, the background label is from `num_things` to\n        `num_things + num_stuff - 1`, 255 means the ignored label (`VOID`).\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded mask and semantic segmentation\n                annotations. `BitmapMasks` is used for mask annotations.\n        \"\"\"\n\n        if self.file_client is None:\n            self.file_client = mmcv.FileClient(**self.file_client_args)\n\n        filename = osp.join(results['seg_prefix'],\n                            results['ann_info']['seg_map'])\n        img_bytes = self.file_client.get(filename)\n        pan_png = mmcv.imfrombytes(\n            img_bytes, flag='color', channel_order='rgb').squeeze()\n        pan_png = rgb2id(pan_png)\n\n        gt_masks = []\n        gt_seg = np.zeros_like(pan_png) + 255  # 255 as ignore\n\n        for mask_info in results['ann_info']['masks']:\n            mask = (pan_png == mask_info['id'])\n            gt_seg = np.where(mask, mask_info['category'], gt_seg)\n\n            # The legal thing masks\n            if mask_info.get('is_thing'):\n                gt_masks.append(mask.astype(np.uint8))\n\n        if self.with_mask:\n            h, w = results['img_info']['height'], results['img_info']['width']\n            gt_masks = BitmapMasks(gt_masks, h, w)\n            results['gt_masks'] = gt_masks\n            results['mask_fields'].append('gt_masks')\n\n        if self.with_seg:\n            results['gt_semantic_seg'] = gt_seg\n            results['seg_fields'].append('gt_semantic_seg')\n        return results\n\n    def __call__(self, results):\n        \"\"\"Call function to load multiple types panoptic annotations.\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded bounding box, label, mask and\n                semantic segmentation annotations.\n        \"\"\"\n\n        if self.with_bbox:\n            results = self._load_bboxes(results)\n            if results is None:\n                return None\n        if self.with_label:\n            results = self._load_labels(results)\n        if self.with_mask or self.with_seg:\n            # The tasks completed by '_load_masks' and '_load_semantic_segs'\n            # in LoadAnnotations are merged to one function.\n            results = self._load_masks_and_semantic_segs(results)\n\n        return results\n\n\n@PIPELINES.register_module()\nclass LoadProposals:\n    \"\"\"Load proposal pipeline.\n\n    Required key is \"proposals\". Updated keys are \"proposals\", \"bbox_fields\".\n\n    Args:\n        num_max_proposals (int, optional): Maximum number of proposals to load.\n            If not specified, all proposals will be loaded.\n    \"\"\"\n\n    def __init__(self, num_max_proposals=None):\n        self.num_max_proposals = num_max_proposals\n\n    def __call__(self, results):\n        \"\"\"Call function to load proposals from file.\n\n        Args:\n            results (dict): Result dict from :obj:`mmdet.CustomDataset`.\n\n        Returns:\n            dict: The dict contains loaded proposal annotations.\n        \"\"\"\n\n        proposals = results['proposals']\n        if proposals.shape[1] not in (4, 5):\n            raise AssertionError(\n                'proposals should have shapes (n, 4) or (n, 5), '\n                f'but found {proposals.shape}')\n        proposals = proposals[:, :4]\n\n        if self.num_max_proposals is not None:\n            proposals = proposals[:self.num_max_proposals]\n\n        if len(proposals) == 0:\n            proposals = np.array([[0, 0, 0, 0]], dtype=np.float32)\n        results['proposals'] = proposals\n        results['bbox_fields'].append('proposals')\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + \\\n            f'(num_max_proposals={self.num_max_proposals})'\n\n\n@PIPELINES.register_module()\nclass FilterAnnotations:\n    \"\"\"Filter invalid annotations.\n\n    Args:\n        min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth\n            boxes. Default: (1., 1.)\n        min_gt_mask_area (int): Minimum foreground area of ground truth masks.\n            Default: 1\n        by_box (bool): Filter instances with bounding boxes not meeting the\n            min_gt_bbox_wh threshold. Default: True\n        by_mask (bool): Filter instances with masks not meeting\n            min_gt_mask_area threshold. Default: False\n        keep_empty (bool): Whether to return None when it\n            becomes an empty bbox after filtering. Default: True\n    \"\"\"\n\n    def __init__(self,\n                 min_gt_bbox_wh=(1., 1.),\n                 min_gt_mask_area=1,\n                 by_box=True,\n                 by_mask=False,\n                 keep_empty=True):\n        # TODO: add more filter options\n        assert by_box or by_mask\n        self.min_gt_bbox_wh = min_gt_bbox_wh\n        self.min_gt_mask_area = min_gt_mask_area\n        self.by_box = by_box\n        self.by_mask = by_mask\n        self.keep_empty = keep_empty\n\n    def __call__(self, results):\n        if self.by_box:\n            assert 'gt_bboxes' in results\n            gt_bboxes = results['gt_bboxes']\n            instance_num = gt_bboxes.shape[0]\n        if self.by_mask:\n            assert 'gt_masks' in results\n            gt_masks = results['gt_masks']\n            instance_num = len(gt_masks)\n\n        if instance_num == 0:\n            return results\n\n        tests = []\n        if self.by_box:\n            w = gt_bboxes[:, 2] - gt_bboxes[:, 0]\n            h = gt_bboxes[:, 3] - gt_bboxes[:, 1]\n            tests.append((w > self.min_gt_bbox_wh[0])\n                         & (h > self.min_gt_bbox_wh[1]))\n        if self.by_mask:\n            gt_masks = results['gt_masks']\n            tests.append(gt_masks.areas >= self.min_gt_mask_area)\n\n        keep = tests[0]\n        for t in tests[1:]:\n            keep = keep & t\n\n        keep = keep.nonzero()[0]\n\n        keys = ('gt_bboxes', 'gt_labels', 'gt_masks')\n        for key in keys:\n            if key in results:\n                results[key] = results[key][keep]\n        if keep.size == 0:\n            if self.keep_empty:\n                return None\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + \\\n            f'(min_gt_bbox_wh={self.min_gt_bbox_wh},' \\\n            f'min_gt_mask_area={self.min_gt_mask_area},' \\\n            f'by_box={self.by_box},' \\\n            f'by_mask={self.by_mask},' \\\n            f'always_keep={self.always_keep})'\n"
  },
  {
    "path": "mmdet/datasets/pipelines/test_time_aug.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport mmcv\n\nfrom ..builder import PIPELINES\nfrom .compose import Compose\n\n\n@PIPELINES.register_module()\nclass MultiScaleFlipAug:\n    \"\"\"Test-time augmentation with multiple scales and flipping.\n\n    An example configuration is as followed:\n\n    .. code-block::\n\n        img_scale=[(1333, 400), (1333, 800)],\n        flip=True,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=32),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img']),\n        ]\n\n    After MultiScaleFLipAug with above configuration, the results are wrapped\n    into lists of the same length as followed:\n\n    .. code-block::\n\n        dict(\n            img=[...],\n            img_shape=[...],\n            scale=[(1333, 400), (1333, 400), (1333, 800), (1333, 800)]\n            flip=[False, True, False, True]\n            ...\n        )\n\n    Args:\n        transforms (list[dict]): Transforms to apply in each augmentation.\n        img_scale (tuple | list[tuple] | None): Images scales for resizing.\n        scale_factor (float | list[float] | None): Scale factors for resizing.\n        flip (bool): Whether apply flip augmentation. Default: False.\n        flip_direction (str | list[str]): Flip augmentation directions,\n            options are \"horizontal\", \"vertical\" and \"diagonal\". If\n            flip_direction is a list, multiple flip augmentations will be\n            applied. It has no effect when flip == False. Default:\n            \"horizontal\".\n    \"\"\"\n\n    def __init__(self,\n                 transforms,\n                 img_scale=None,\n                 scale_factor=None,\n                 flip=False,\n                 flip_direction='horizontal'):\n        self.transforms = Compose(transforms)\n        assert (img_scale is None) ^ (scale_factor is None), (\n            'Must have but only one variable can be set')\n        if img_scale is not None:\n            self.img_scale = img_scale if isinstance(img_scale,\n                                                     list) else [img_scale]\n            self.scale_key = 'scale'\n            assert mmcv.is_list_of(self.img_scale, tuple)\n        else:\n            self.img_scale = scale_factor if isinstance(\n                scale_factor, list) else [scale_factor]\n            self.scale_key = 'scale_factor'\n\n        self.flip = flip\n        self.flip_direction = flip_direction if isinstance(\n            flip_direction, list) else [flip_direction]\n        assert mmcv.is_list_of(self.flip_direction, str)\n        if not self.flip and self.flip_direction != ['horizontal']:\n            warnings.warn(\n                'flip_direction has no effect when flip is set to False')\n        if (self.flip\n                and not any([t['type'] == 'RandomFlip' for t in transforms])):\n            warnings.warn(\n                'flip has no effect when RandomFlip is not in transforms')\n\n    def __call__(self, results):\n        \"\"\"Call function to apply test time augment transforms on results.\n\n        Args:\n            results (dict): Result dict contains the data to transform.\n\n        Returns:\n           dict[str: list]: The augmented data, where each value is wrapped\n               into a list.\n        \"\"\"\n\n        aug_data = []\n        flip_args = [(False, None)]\n        if self.flip:\n            flip_args += [(True, direction)\n                          for direction in self.flip_direction]\n        for scale in self.img_scale:\n            for flip, direction in flip_args:\n                _results = results.copy()\n                _results[self.scale_key] = scale\n                _results['flip'] = flip\n                _results['flip_direction'] = direction\n                data = self.transforms(_results)\n                aug_data.append(data)\n        # list of dict to dict of list\n        aug_data_dict = {key: [] for key in aug_data[0]}\n        for data in aug_data:\n            for key, val in data.items():\n                aug_data_dict[key].append(val)\n        return aug_data_dict\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(transforms={self.transforms}, '\n        repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '\n        repr_str += f'flip_direction={self.flip_direction})'\n        return repr_str\n"
  },
  {
    "path": "mmdet/datasets/pipelines/transforms.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport inspect\nimport math\nimport warnings\n\nimport cv2\nimport mmcv\nimport numpy as np\nfrom numpy import random\n\nfrom mmdet.core import BitmapMasks, PolygonMasks, find_inside_bboxes\nfrom mmdet.core.evaluation.bbox_overlaps import bbox_overlaps\nfrom mmdet.utils import log_img_scale\nfrom ..builder import PIPELINES\n\ntry:\n    from imagecorruptions import corrupt\nexcept ImportError:\n    corrupt = None\n\ntry:\n    import albumentations\n    from albumentations import Compose\nexcept ImportError:\n    albumentations = None\n    Compose = None\n\n\n@PIPELINES.register_module()\nclass Resize:\n    \"\"\"Resize images & bbox & mask.\n\n    This transform resizes the input image to some scale. Bboxes and masks are\n    then resized with the same scale factor. If the input dict contains the key\n    \"scale\", then the scale in the input dict is used, otherwise the specified\n    scale in the init method is used. If the input dict contains the key\n    \"scale_factor\" (if MultiScaleFlipAug does not give img_scale but\n    scale_factor), the actual scale will be computed by image shape and\n    scale_factor.\n\n    `img_scale` can either be a tuple (single-scale) or a list of tuple\n    (multi-scale). There are 3 multiscale modes:\n\n    - ``ratio_range is not None``: randomly sample a ratio from the ratio \\\n      range and multiply it with the image scale.\n    - ``ratio_range is None`` and ``multiscale_mode == \"range\"``: randomly \\\n      sample a scale from the multiscale range.\n    - ``ratio_range is None`` and ``multiscale_mode == \"value\"``: randomly \\\n      sample a scale from multiple scales.\n\n    Args:\n        img_scale (tuple or list[tuple]): Images scales for resizing.\n        multiscale_mode (str): Either \"range\" or \"value\".\n        ratio_range (tuple[float]): (min_ratio, max_ratio)\n        keep_ratio (bool): Whether to keep the aspect ratio when resizing the\n            image.\n        bbox_clip_border (bool, optional): Whether to clip the objects outside\n            the border of the image. In some dataset like MOT17, the gt bboxes\n            are allowed to cross the border of images. Therefore, we don't\n            need to clip the gt bboxes in these cases. Defaults to True.\n        backend (str): Image resize backend, choices are 'cv2' and 'pillow'.\n            These two backends generates slightly different results. Defaults\n            to 'cv2'.\n        interpolation (str): Interpolation method, accepted values are\n            \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n            backend, \"nearest\", \"bilinear\" for 'pillow' backend.\n        override (bool, optional): Whether to override `scale` and\n            `scale_factor` so as to call resize twice. Default False. If True,\n            after the first resizing, the existed `scale` and `scale_factor`\n            will be ignored so the second resizing can be allowed.\n            This option is a work-around for multiple times of resize in DETR.\n            Defaults to False.\n    \"\"\"\n\n    def __init__(self,\n                 img_scale=None,\n                 multiscale_mode='range',\n                 ratio_range=None,\n                 keep_ratio=True,\n                 bbox_clip_border=True,\n                 backend='cv2',\n                 interpolation='bilinear',\n                 override=False):\n        if img_scale is None:\n            self.img_scale = None\n        else:\n            if isinstance(img_scale, list):\n                self.img_scale = img_scale\n            else:\n                self.img_scale = [img_scale]\n            assert mmcv.is_list_of(self.img_scale, tuple)\n\n        if ratio_range is not None:\n            # mode 1: given a scale and a range of image ratio\n            assert len(self.img_scale) == 1\n        else:\n            # mode 2: given multiple scales or a range of scales\n            assert multiscale_mode in ['value', 'range']\n\n        self.backend = backend\n        self.multiscale_mode = multiscale_mode\n        self.ratio_range = ratio_range\n        self.keep_ratio = keep_ratio\n        # TODO: refactor the override option in Resize\n        self.interpolation = interpolation\n        self.override = override\n        self.bbox_clip_border = bbox_clip_border\n\n    @staticmethod\n    def random_select(img_scales):\n        \"\"\"Randomly select an img_scale from given candidates.\n\n        Args:\n            img_scales (list[tuple]): Images scales for selection.\n\n        Returns:\n            (tuple, int): Returns a tuple ``(img_scale, scale_dix)``, \\\n                where ``img_scale`` is the selected image scale and \\\n                ``scale_idx`` is the selected index in the given candidates.\n        \"\"\"\n\n        assert mmcv.is_list_of(img_scales, tuple)\n        scale_idx = np.random.randint(len(img_scales))\n        img_scale = img_scales[scale_idx]\n        return img_scale, scale_idx\n\n    @staticmethod\n    def random_sample(img_scales):\n        \"\"\"Randomly sample an img_scale when ``multiscale_mode=='range'``.\n\n        Args:\n            img_scales (list[tuple]): Images scale range for sampling.\n                There must be two tuples in img_scales, which specify the lower\n                and upper bound of image scales.\n\n        Returns:\n            (tuple, None): Returns a tuple ``(img_scale, None)``, where \\\n                ``img_scale`` is sampled scale and None is just a placeholder \\\n                to be consistent with :func:`random_select`.\n        \"\"\"\n\n        assert mmcv.is_list_of(img_scales, tuple) and len(img_scales) == 2\n        img_scale_long = [max(s) for s in img_scales]\n        img_scale_short = [min(s) for s in img_scales]\n        long_edge = np.random.randint(\n            min(img_scale_long),\n            max(img_scale_long) + 1)\n        short_edge = np.random.randint(\n            min(img_scale_short),\n            max(img_scale_short) + 1)\n        img_scale = (long_edge, short_edge)\n        return img_scale, None\n\n    @staticmethod\n    def random_sample_ratio(img_scale, ratio_range):\n        \"\"\"Randomly sample an img_scale when ``ratio_range`` is specified.\n\n        A ratio will be randomly sampled from the range specified by\n        ``ratio_range``. Then it would be multiplied with ``img_scale`` to\n        generate sampled scale.\n\n        Args:\n            img_scale (tuple): Images scale base to multiply with ratio.\n            ratio_range (tuple[float]): The minimum and maximum ratio to scale\n                the ``img_scale``.\n\n        Returns:\n            (tuple, None): Returns a tuple ``(scale, None)``, where \\\n                ``scale`` is sampled ratio multiplied with ``img_scale`` and \\\n                None is just a placeholder to be consistent with \\\n                :func:`random_select`.\n        \"\"\"\n\n        assert isinstance(img_scale, tuple) and len(img_scale) == 2\n        min_ratio, max_ratio = ratio_range\n        assert min_ratio <= max_ratio\n        ratio = np.random.random_sample() * (max_ratio - min_ratio) + min_ratio\n        scale = int(img_scale[0] * ratio), int(img_scale[1] * ratio)\n        return scale, None\n\n    def _random_scale(self, results):\n        \"\"\"Randomly sample an img_scale according to ``ratio_range`` and\n        ``multiscale_mode``.\n\n        If ``ratio_range`` is specified, a ratio will be sampled and be\n        multiplied with ``img_scale``.\n        If multiple scales are specified by ``img_scale``, a scale will be\n        sampled according to ``multiscale_mode``.\n        Otherwise, single scale will be used.\n\n        Args:\n            results (dict): Result dict from :obj:`dataset`.\n\n        Returns:\n            dict: Two new keys 'scale` and 'scale_idx` are added into \\\n                ``results``, which would be used by subsequent pipelines.\n        \"\"\"\n\n        if self.ratio_range is not None:\n            scale, scale_idx = self.random_sample_ratio(\n                self.img_scale[0], self.ratio_range)\n        elif len(self.img_scale) == 1:\n            scale, scale_idx = self.img_scale[0], 0\n        elif self.multiscale_mode == 'range':\n            scale, scale_idx = self.random_sample(self.img_scale)\n        elif self.multiscale_mode == 'value':\n            scale, scale_idx = self.random_select(self.img_scale)\n        else:\n            raise NotImplementedError\n\n        results['scale'] = scale\n        results['scale_idx'] = scale_idx\n\n    def _resize_img(self, results):\n        \"\"\"Resize images with ``results['scale']``.\"\"\"\n        for key in results.get('img_fields', ['img']):\n            if self.keep_ratio:\n                img, scale_factor = mmcv.imrescale(\n                    results[key],\n                    results['scale'],\n                    return_scale=True,\n                    interpolation=self.interpolation,\n                    backend=self.backend)\n                # the w_scale and h_scale has minor difference\n                # a real fix should be done in the mmcv.imrescale in the future\n                new_h, new_w = img.shape[:2]\n                h, w = results[key].shape[:2]\n                w_scale = new_w / w\n                h_scale = new_h / h\n            else:\n                img, w_scale, h_scale = mmcv.imresize(\n                    results[key],\n                    results['scale'],\n                    return_scale=True,\n                    interpolation=self.interpolation,\n                    backend=self.backend)\n            results[key] = img\n\n            scale_factor = np.array([w_scale, h_scale, w_scale, h_scale],\n                                    dtype=np.float32)\n            results['img_shape'] = img.shape\n            # in case that there is no padding\n            results['pad_shape'] = img.shape\n            results['scale_factor'] = scale_factor\n            results['keep_ratio'] = self.keep_ratio\n\n    def _resize_bboxes(self, results):\n        \"\"\"Resize bounding boxes with ``results['scale_factor']``.\"\"\"\n        for key in results.get('bbox_fields', []):\n            bboxes = results[key] * results['scale_factor']\n            if self.bbox_clip_border:\n                img_shape = results['img_shape']\n                bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n                bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n            results[key] = bboxes\n\n    def _resize_masks(self, results):\n        \"\"\"Resize masks with ``results['scale']``\"\"\"\n        for key in results.get('mask_fields', []):\n            if results[key] is None:\n                continue\n            if self.keep_ratio:\n                results[key] = results[key].rescale(results['scale'])\n            else:\n                results[key] = results[key].resize(results['img_shape'][:2])\n\n    def _resize_seg(self, results):\n        \"\"\"Resize semantic segmentation map with ``results['scale']``.\"\"\"\n        for key in results.get('seg_fields', []):\n            if self.keep_ratio:\n                gt_seg = mmcv.imrescale(\n                    results[key],\n                    results['scale'],\n                    interpolation='nearest',\n                    backend=self.backend)\n            else:\n                gt_seg = mmcv.imresize(\n                    results[key],\n                    results['scale'],\n                    interpolation='nearest',\n                    backend=self.backend)\n            results[key] = gt_seg\n\n    def __call__(self, results):\n        \"\"\"Call function to resize images, bounding boxes, masks, semantic\n        segmentation map.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Resized results, 'img_shape', 'pad_shape', 'scale_factor', \\\n                'keep_ratio' keys are added into result dict.\n        \"\"\"\n\n        if 'scale' not in results:\n            if 'scale_factor' in results:\n                img_shape = results['img'].shape[:2]\n                scale_factor = results['scale_factor']\n                assert isinstance(scale_factor, float)\n                results['scale'] = tuple(\n                    [int(x * scale_factor) for x in img_shape][::-1])\n            else:\n                self._random_scale(results)\n        else:\n            if not self.override:\n                assert 'scale_factor' not in results, (\n                    'scale and scale_factor cannot be both set.')\n            else:\n                results.pop('scale')\n                if 'scale_factor' in results:\n                    results.pop('scale_factor')\n                self._random_scale(results)\n\n        self._resize_img(results)\n        self._resize_bboxes(results)\n        self._resize_masks(results)\n        self._resize_seg(results)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(img_scale={self.img_scale}, '\n        repr_str += f'multiscale_mode={self.multiscale_mode}, '\n        repr_str += f'ratio_range={self.ratio_range}, '\n        repr_str += f'keep_ratio={self.keep_ratio}, '\n        repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass RandomFlip:\n    \"\"\"Flip the image & bbox & mask.\n\n    If the input dict contains the key \"flip\", then the flag will be used,\n    otherwise it will be randomly decided by a ratio specified in the init\n    method.\n\n    When random flip is enabled, ``flip_ratio``/``direction`` can either be a\n    float/string or tuple of float/string. There are 3 flip modes:\n\n    - ``flip_ratio`` is float, ``direction`` is string: the image will be\n        ``direction``ly flipped with probability of ``flip_ratio`` .\n        E.g., ``flip_ratio=0.5``, ``direction='horizontal'``,\n        then image will be horizontally flipped with probability of 0.5.\n    - ``flip_ratio`` is float, ``direction`` is list of string: the image will\n        be ``direction[i]``ly flipped with probability of\n        ``flip_ratio/len(direction)``.\n        E.g., ``flip_ratio=0.5``, ``direction=['horizontal', 'vertical']``,\n        then image will be horizontally flipped with probability of 0.25,\n        vertically with probability of 0.25.\n    - ``flip_ratio`` is list of float, ``direction`` is list of string:\n        given ``len(flip_ratio) == len(direction)``, the image will\n        be ``direction[i]``ly flipped with probability of ``flip_ratio[i]``.\n        E.g., ``flip_ratio=[0.3, 0.5]``, ``direction=['horizontal',\n        'vertical']``, then image will be horizontally flipped with probability\n        of 0.3, vertically with probability of 0.5.\n\n    Args:\n        flip_ratio (float | list[float], optional): The flipping probability.\n            Default: None.\n        direction(str | list[str], optional): The flipping direction. Options\n            are 'horizontal', 'vertical', 'diagonal'. Default: 'horizontal'.\n            If input is a list, the length must equal ``flip_ratio``. Each\n            element in ``flip_ratio`` indicates the flip probability of\n            corresponding direction.\n    \"\"\"\n\n    def __init__(self, flip_ratio=None, direction='horizontal'):\n        if isinstance(flip_ratio, list):\n            assert mmcv.is_list_of(flip_ratio, float)\n            assert 0 <= sum(flip_ratio) <= 1\n        elif isinstance(flip_ratio, float):\n            assert 0 <= flip_ratio <= 1\n        elif flip_ratio is None:\n            pass\n        else:\n            raise ValueError('flip_ratios must be None, float, '\n                             'or list of float')\n        self.flip_ratio = flip_ratio\n\n        valid_directions = ['horizontal', 'vertical', 'diagonal']\n        if isinstance(direction, str):\n            assert direction in valid_directions\n        elif isinstance(direction, list):\n            assert mmcv.is_list_of(direction, str)\n            assert set(direction).issubset(set(valid_directions))\n        else:\n            raise ValueError('direction must be either str or list of str')\n        self.direction = direction\n\n        if isinstance(flip_ratio, list):\n            assert len(self.flip_ratio) == len(self.direction)\n\n    def bbox_flip(self, bboxes, img_shape, direction):\n        \"\"\"Flip bboxes horizontally.\n\n        Args:\n            bboxes (numpy.ndarray): Bounding boxes, shape (..., 4*k)\n            img_shape (tuple[int]): Image shape (height, width)\n            direction (str): Flip direction. Options are 'horizontal',\n                'vertical'.\n\n        Returns:\n            numpy.ndarray: Flipped bounding boxes.\n        \"\"\"\n\n        assert bboxes.shape[-1] % 4 == 0\n        flipped = bboxes.copy()\n        if direction == 'horizontal':\n            w = img_shape[1]\n            flipped[..., 0::4] = w - bboxes[..., 2::4]\n            flipped[..., 2::4] = w - bboxes[..., 0::4]\n        elif direction == 'vertical':\n            h = img_shape[0]\n            flipped[..., 1::4] = h - bboxes[..., 3::4]\n            flipped[..., 3::4] = h - bboxes[..., 1::4]\n        elif direction == 'diagonal':\n            w = img_shape[1]\n            h = img_shape[0]\n            flipped[..., 0::4] = w - bboxes[..., 2::4]\n            flipped[..., 1::4] = h - bboxes[..., 3::4]\n            flipped[..., 2::4] = w - bboxes[..., 0::4]\n            flipped[..., 3::4] = h - bboxes[..., 1::4]\n        else:\n            raise ValueError(f\"Invalid flipping direction '{direction}'\")\n        return flipped\n\n    def __call__(self, results):\n        \"\"\"Call function to flip bounding boxes, masks, semantic segmentation\n        maps.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Flipped results, 'flip', 'flip_direction' keys are added \\\n                into result dict.\n        \"\"\"\n\n        if 'flip' not in results:\n            if isinstance(self.direction, list):\n                # None means non-flip\n                direction_list = self.direction + [None]\n            else:\n                # None means non-flip\n                direction_list = [self.direction, None]\n\n            if isinstance(self.flip_ratio, list):\n                non_flip_ratio = 1 - sum(self.flip_ratio)\n                flip_ratio_list = self.flip_ratio + [non_flip_ratio]\n            else:\n                non_flip_ratio = 1 - self.flip_ratio\n                # exclude non-flip\n                single_ratio = self.flip_ratio / (len(direction_list) - 1)\n                flip_ratio_list = [single_ratio] * (len(direction_list) -\n                                                    1) + [non_flip_ratio]\n\n            cur_dir = np.random.choice(direction_list, p=flip_ratio_list)\n\n            results['flip'] = cur_dir is not None\n        if 'flip_direction' not in results:\n            results['flip_direction'] = cur_dir\n        if results['flip']:\n            # flip image\n            for key in results.get('img_fields', ['img']):\n                results[key] = mmcv.imflip(\n                    results[key], direction=results['flip_direction'])\n            # flip bboxes\n            for key in results.get('bbox_fields', []):\n                results[key] = self.bbox_flip(results[key],\n                                              results['img_shape'],\n                                              results['flip_direction'])\n            # flip masks\n            for key in results.get('mask_fields', []):\n                results[key] = results[key].flip(results['flip_direction'])\n\n            # flip segs\n            for key in results.get('seg_fields', []):\n                results[key] = mmcv.imflip(\n                    results[key], direction=results['flip_direction'])\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + f'(flip_ratio={self.flip_ratio})'\n\n\n@PIPELINES.register_module()\nclass RandomShift:\n    \"\"\"Shift the image and box given shift pixels and probability.\n\n    Args:\n        shift_ratio (float): Probability of shifts. Default 0.5.\n        max_shift_px (int): The max pixels for shifting. Default 32.\n        filter_thr_px (int): The width and height threshold for filtering.\n            The bbox and the rest of the targets below the width and\n            height threshold will be filtered. Default 1.\n    \"\"\"\n\n    def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1):\n        assert 0 <= shift_ratio <= 1\n        assert max_shift_px >= 0\n        self.shift_ratio = shift_ratio\n        self.max_shift_px = max_shift_px\n        self.filter_thr_px = int(filter_thr_px)\n        # The key correspondence from bboxes to labels.\n        self.bbox2label = {\n            'gt_bboxes': 'gt_labels',\n            'gt_bboxes_ignore': 'gt_labels_ignore'\n        }\n\n    def __call__(self, results):\n        \"\"\"Call function to random shift images, bounding boxes.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Shift results.\n        \"\"\"\n        if random.random() < self.shift_ratio:\n            img_shape = results['img'].shape[:2]\n\n            random_shift_x = random.randint(-self.max_shift_px,\n                                            self.max_shift_px)\n            random_shift_y = random.randint(-self.max_shift_px,\n                                            self.max_shift_px)\n            new_x = max(0, random_shift_x)\n            ori_x = max(0, -random_shift_x)\n            new_y = max(0, random_shift_y)\n            ori_y = max(0, -random_shift_y)\n\n            # TODO: support mask and semantic segmentation maps.\n            for key in results.get('bbox_fields', []):\n                bboxes = results[key].copy()\n                bboxes[..., 0::2] += random_shift_x\n                bboxes[..., 1::2] += random_shift_y\n\n                # clip border\n                bboxes[..., 0::2] = np.clip(bboxes[..., 0::2], 0, img_shape[1])\n                bboxes[..., 1::2] = np.clip(bboxes[..., 1::2], 0, img_shape[0])\n\n                # remove invalid bboxes\n                bbox_w = bboxes[..., 2] - bboxes[..., 0]\n                bbox_h = bboxes[..., 3] - bboxes[..., 1]\n                valid_inds = (bbox_w > self.filter_thr_px) & (\n                    bbox_h > self.filter_thr_px)\n                # If the shift does not contain any gt-bbox area, skip this\n                # image.\n                if key == 'gt_bboxes' and not valid_inds.any():\n                    return results\n                bboxes = bboxes[valid_inds]\n                results[key] = bboxes\n\n                # label fields. e.g. gt_labels and gt_labels_ignore\n                label_key = self.bbox2label.get(key)\n                if label_key in results:\n                    results[label_key] = results[label_key][valid_inds]\n\n            for key in results.get('img_fields', ['img']):\n                img = results[key]\n                new_img = np.zeros_like(img)\n                img_h, img_w = img.shape[:2]\n                new_h = img_h - np.abs(random_shift_y)\n                new_w = img_w - np.abs(random_shift_x)\n                new_img[new_y:new_y + new_h, new_x:new_x + new_w] \\\n                    = img[ori_y:ori_y + new_h, ori_x:ori_x + new_w]\n                results[key] = new_img\n\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(max_shift_px={self.max_shift_px}, '\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass Pad:\n    \"\"\"Pad the image & masks & segmentation map.\n\n    There are two padding modes: (1) pad to a fixed size and (2) pad to the\n    minimum size that is divisible by some number.\n    Added keys are \"pad_shape\", \"pad_fixed_size\", \"pad_size_divisor\",\n\n    Args:\n        size (tuple, optional): Fixed padding size.\n        size_divisor (int, optional): The divisor of padded size.\n        pad_to_square (bool): Whether to pad the image into a square.\n            Currently only used for YOLOX. Default: False.\n        pad_val (dict, optional): A dict for padding value, the default\n            value is `dict(img=0, masks=0, seg=255)`.\n    \"\"\"\n\n    def __init__(self,\n                 size=None,\n                 size_divisor=None,\n                 pad_to_square=False,\n                 pad_val=dict(img=0, masks=0, seg=255)):\n        self.size = size\n        self.size_divisor = size_divisor\n        if isinstance(pad_val, float) or isinstance(pad_val, int):\n            warnings.warn(\n                'pad_val of float type is deprecated now, '\n                f'please use pad_val=dict(img={pad_val}, '\n                f'masks={pad_val}, seg=255) instead.', DeprecationWarning)\n            pad_val = dict(img=pad_val, masks=pad_val, seg=255)\n        assert isinstance(pad_val, dict)\n        self.pad_val = pad_val\n        self.pad_to_square = pad_to_square\n\n        if pad_to_square:\n            assert size is None and size_divisor is None, \\\n                'The size and size_divisor must be None ' \\\n                'when pad2square is True'\n        else:\n            assert size is not None or size_divisor is not None, \\\n                'only one of size and size_divisor should be valid'\n            assert size is None or size_divisor is None\n\n    def _pad_img(self, results):\n        \"\"\"Pad images according to ``self.size``.\"\"\"\n        pad_val = self.pad_val.get('img', 0)\n        for key in results.get('img_fields', ['img']):\n            if self.pad_to_square:\n                max_size = max(results[key].shape[:2])\n                self.size = (max_size, max_size)\n            if self.size is not None:\n                padded_img = mmcv.impad(\n                    results[key], shape=self.size, pad_val=pad_val)\n            elif self.size_divisor is not None:\n                padded_img = mmcv.impad_to_multiple(\n                    results[key], self.size_divisor, pad_val=pad_val)\n            results[key] = padded_img\n        results['pad_shape'] = padded_img.shape\n        results['pad_fixed_size'] = self.size\n        results['pad_size_divisor'] = self.size_divisor\n\n    def _pad_masks(self, results):\n        \"\"\"Pad masks according to ``results['pad_shape']``.\"\"\"\n        pad_shape = results['pad_shape'][:2]\n        pad_val = self.pad_val.get('masks', 0)\n        for key in results.get('mask_fields', []):\n            results[key] = results[key].pad(pad_shape, pad_val=pad_val)\n\n    def _pad_seg(self, results):\n        \"\"\"Pad semantic segmentation map according to\n        ``results['pad_shape']``.\"\"\"\n        pad_val = self.pad_val.get('seg', 255)\n        for key in results.get('seg_fields', []):\n            results[key] = mmcv.impad(\n                results[key], shape=results['pad_shape'][:2], pad_val=pad_val)\n\n    def __call__(self, results):\n        \"\"\"Call function to pad images, masks, semantic segmentation maps.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        self._pad_img(results)\n        self._pad_masks(results)\n        self._pad_seg(results)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(size={self.size}, '\n        repr_str += f'size_divisor={self.size_divisor}, '\n        repr_str += f'pad_to_square={self.pad_to_square}, '\n        repr_str += f'pad_val={self.pad_val})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass Normalize:\n    \"\"\"Normalize the image.\n\n    Added key is \"img_norm_cfg\".\n\n    Args:\n        mean (sequence): Mean values of 3 channels.\n        std (sequence): Std values of 3 channels.\n        to_rgb (bool): Whether to convert the image from BGR to RGB,\n            default is true.\n    \"\"\"\n\n    def __init__(self, mean, std, to_rgb=True):\n        self.mean = np.array(mean, dtype=np.float32)\n        self.std = np.array(std, dtype=np.float32)\n        self.to_rgb = to_rgb\n\n    def __call__(self, results):\n        \"\"\"Call function to normalize images.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Normalized results, 'img_norm_cfg' key is added into\n                result dict.\n        \"\"\"\n        for key in results.get('img_fields', ['img']):\n            results[key] = mmcv.imnormalize(results[key], self.mean, self.std,\n                                            self.to_rgb)\n        results['img_norm_cfg'] = dict(\n            mean=self.mean, std=self.std, to_rgb=self.to_rgb)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(mean={self.mean}, std={self.std}, to_rgb={self.to_rgb})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass RandomCrop:\n    \"\"\"Random crop the image & bboxes & masks.\n\n    The absolute `crop_size` is sampled based on `crop_type` and `image_size`,\n    then the cropped results are generated.\n\n    Args:\n        crop_size (tuple): The relative ratio or absolute pixels of\n            height and width.\n        crop_type (str, optional): one of \"relative_range\", \"relative\",\n            \"absolute\", \"absolute_range\". \"relative\" randomly crops\n            (h * crop_size[0], w * crop_size[1]) part from an input of size\n            (h, w). \"relative_range\" uniformly samples relative crop size from\n            range [crop_size[0], 1] and [crop_size[1], 1] for height and width\n            respectively. \"absolute\" crops from an input with absolute size\n            (crop_size[0], crop_size[1]). \"absolute_range\" uniformly samples\n            crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w\n            in range [crop_size[0], min(w, crop_size[1])]. Default \"absolute\".\n        allow_negative_crop (bool, optional): Whether to allow a crop that does\n            not contain any bbox area. Default False.\n        recompute_bbox (bool, optional): Whether to re-compute the boxes based\n            on cropped instance masks. Default False.\n        bbox_clip_border (bool, optional): Whether clip the objects outside\n            the border of the image. Defaults to True.\n\n    Note:\n        - If the image is smaller than the absolute crop size, return the\n            original image.\n        - The keys for bboxes, labels and masks must be aligned. That is,\n          `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and\n          `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and\n          `gt_masks_ignore`.\n        - If the crop does not contain any gt-bbox region and\n          `allow_negative_crop` is set to False, skip this image.\n    \"\"\"\n\n    def __init__(self,\n                 crop_size,\n                 crop_type='absolute',\n                 allow_negative_crop=False,\n                 recompute_bbox=False,\n                 bbox_clip_border=True):\n        if crop_type not in [\n                'relative_range', 'relative', 'absolute', 'absolute_range'\n        ]:\n            raise ValueError(f'Invalid crop_type {crop_type}.')\n        if crop_type in ['absolute', 'absolute_range']:\n            assert crop_size[0] > 0 and crop_size[1] > 0\n            assert isinstance(crop_size[0], int) and isinstance(\n                crop_size[1], int)\n        else:\n            assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1\n        self.crop_size = crop_size\n        self.crop_type = crop_type\n        self.allow_negative_crop = allow_negative_crop\n        self.bbox_clip_border = bbox_clip_border\n        self.recompute_bbox = recompute_bbox\n        # The key correspondence from bboxes to labels and masks.\n        self.bbox2label = {\n            'gt_bboxes': 'gt_labels',\n            'gt_bboxes_ignore': 'gt_labels_ignore'\n        }\n        self.bbox2mask = {\n            'gt_bboxes': 'gt_masks',\n            'gt_bboxes_ignore': 'gt_masks_ignore'\n        }\n\n    def _crop_data(self, results, crop_size, allow_negative_crop):\n        \"\"\"Function to randomly crop images, bounding boxes, masks, semantic\n        segmentation maps.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n            crop_size (tuple): Expected absolute size after cropping, (h, w).\n            allow_negative_crop (bool): Whether to allow a crop that does not\n                contain any bbox area. Default to False.\n\n        Returns:\n            dict: Randomly cropped results, 'img_shape' key in result dict is\n                updated according to crop size.\n        \"\"\"\n        assert crop_size[0] > 0 and crop_size[1] > 0\n        for key in results.get('img_fields', ['img']):\n            img = results[key]\n            margin_h = max(img.shape[0] - crop_size[0], 0)\n            margin_w = max(img.shape[1] - crop_size[1], 0)\n            offset_h = np.random.randint(0, margin_h + 1)\n            offset_w = np.random.randint(0, margin_w + 1)\n            crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n            crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n            # crop the image\n            img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n            img_shape = img.shape\n            results[key] = img\n        results['img_shape'] = img_shape\n\n        # crop bboxes accordingly and clip to the image boundary\n        for key in results.get('bbox_fields', []):\n            # e.g. gt_bboxes and gt_bboxes_ignore\n            bbox_offset = np.array([offset_w, offset_h, offset_w, offset_h],\n                                   dtype=np.float32)\n            bboxes = results[key] - bbox_offset\n            if self.bbox_clip_border:\n                bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, img_shape[1])\n                bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, img_shape[0])\n            valid_inds = (bboxes[:, 2] > bboxes[:, 0]) & (\n                bboxes[:, 3] > bboxes[:, 1])\n            # If the crop does not contain any gt-bbox area and\n            # allow_negative_crop is False, skip this image.\n            if (key == 'gt_bboxes' and not valid_inds.any()\n                    and not allow_negative_crop):\n                return None\n            results[key] = bboxes[valid_inds, :]\n            # label fields. e.g. gt_labels and gt_labels_ignore\n            label_key = self.bbox2label.get(key)\n            if label_key in results:\n                results[label_key] = results[label_key][valid_inds]\n\n            # mask fields, e.g. gt_masks and gt_masks_ignore\n            mask_key = self.bbox2mask.get(key)\n            if mask_key in results:\n                results[mask_key] = results[mask_key][\n                    valid_inds.nonzero()[0]].crop(\n                        np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n                if self.recompute_bbox:\n                    results[key] = results[mask_key].get_bboxes()\n\n        # crop semantic seg\n        for key in results.get('seg_fields', []):\n            results[key] = results[key][crop_y1:crop_y2, crop_x1:crop_x2]\n\n        return results\n\n    def _get_crop_size(self, image_size):\n        \"\"\"Randomly generates the absolute crop size based on `crop_type` and\n        `image_size`.\n\n        Args:\n            image_size (tuple): (h, w).\n\n        Returns:\n            crop_size (tuple): (crop_h, crop_w) in absolute pixels.\n        \"\"\"\n        h, w = image_size\n        if self.crop_type == 'absolute':\n            return (min(self.crop_size[0], h), min(self.crop_size[1], w))\n        elif self.crop_type == 'absolute_range':\n            assert self.crop_size[0] <= self.crop_size[1]\n            crop_h = np.random.randint(\n                min(h, self.crop_size[0]),\n                min(h, self.crop_size[1]) + 1)\n            crop_w = np.random.randint(\n                min(w, self.crop_size[0]),\n                min(w, self.crop_size[1]) + 1)\n            return crop_h, crop_w\n        elif self.crop_type == 'relative':\n            crop_h, crop_w = self.crop_size\n            return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n        elif self.crop_type == 'relative_range':\n            crop_size = np.asarray(self.crop_size, dtype=np.float32)\n            crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)\n            return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n\n    def __call__(self, results):\n        \"\"\"Call function to randomly crop images, bounding boxes, masks,\n        semantic segmentation maps.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Randomly cropped results, 'img_shape' key in result dict is\n                updated according to crop size.\n        \"\"\"\n        image_size = results['img'].shape[:2]\n        crop_size = self._get_crop_size(image_size)\n        results = self._crop_data(results, crop_size, self.allow_negative_crop)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(crop_size={self.crop_size}, '\n        repr_str += f'crop_type={self.crop_type}, '\n        repr_str += f'allow_negative_crop={self.allow_negative_crop}, '\n        repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass SegRescale:\n    \"\"\"Rescale semantic segmentation maps.\n\n    Args:\n        scale_factor (float): The scale factor of the final output.\n        backend (str): Image rescale backend, choices are 'cv2' and 'pillow'.\n            These two backends generates slightly different results. Defaults\n            to 'cv2'.\n    \"\"\"\n\n    def __init__(self, scale_factor=1, backend='cv2'):\n        self.scale_factor = scale_factor\n        self.backend = backend\n\n    def __call__(self, results):\n        \"\"\"Call function to scale the semantic segmentation map.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with semantic segmentation map scaled.\n        \"\"\"\n\n        for key in results.get('seg_fields', []):\n            if self.scale_factor != 1:\n                results[key] = mmcv.imrescale(\n                    results[key],\n                    self.scale_factor,\n                    interpolation='nearest',\n                    backend=self.backend)\n        return results\n\n    def __repr__(self):\n        return self.__class__.__name__ + f'(scale_factor={self.scale_factor})'\n\n\n@PIPELINES.register_module()\nclass PhotoMetricDistortion:\n    \"\"\"Apply photometric distortion to image sequentially, every transformation\n    is applied with a probability of 0.5. The position of random contrast is in\n    second or second to last.\n\n    1. random brightness\n    2. random contrast (mode 0)\n    3. convert color from BGR to HSV\n    4. random saturation\n    5. random hue\n    6. convert color from HSV to BGR\n    7. random contrast (mode 1)\n    8. randomly swap channels\n\n    Args:\n        brightness_delta (int): delta of brightness.\n        contrast_range (tuple): range of contrast.\n        saturation_range (tuple): range of saturation.\n        hue_delta (int): delta of hue.\n    \"\"\"\n\n    def __init__(self,\n                 brightness_delta=32,\n                 contrast_range=(0.5, 1.5),\n                 saturation_range=(0.5, 1.5),\n                 hue_delta=18):\n        self.brightness_delta = brightness_delta\n        self.contrast_lower, self.contrast_upper = contrast_range\n        self.saturation_lower, self.saturation_upper = saturation_range\n        self.hue_delta = hue_delta\n\n    def __call__(self, results):\n        \"\"\"Call function to perform photometric distortion on images.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with images distorted.\n        \"\"\"\n\n        if 'img_fields' in results:\n            assert results['img_fields'] == ['img'], \\\n                'Only single img_fields is allowed'\n        img = results['img']\n        img = img.astype(np.float32)\n        # random brightness\n        if random.randint(2):\n            delta = random.uniform(-self.brightness_delta,\n                                   self.brightness_delta)\n            img += delta\n\n        # mode == 0 --> do random contrast first\n        # mode == 1 --> do random contrast last\n        mode = random.randint(2)\n        if mode == 1:\n            if random.randint(2):\n                alpha = random.uniform(self.contrast_lower,\n                                       self.contrast_upper)\n                img *= alpha\n\n        # convert color from BGR to HSV\n        img = mmcv.bgr2hsv(img)\n\n        # random saturation\n        if random.randint(2):\n            img[..., 1] *= random.uniform(self.saturation_lower,\n                                          self.saturation_upper)\n\n        # random hue\n        if random.randint(2):\n            img[..., 0] += random.uniform(-self.hue_delta, self.hue_delta)\n            img[..., 0][img[..., 0] > 360] -= 360\n            img[..., 0][img[..., 0] < 0] += 360\n\n        # convert color from HSV to BGR\n        img = mmcv.hsv2bgr(img)\n\n        # random contrast\n        if mode == 0:\n            if random.randint(2):\n                alpha = random.uniform(self.contrast_lower,\n                                       self.contrast_upper)\n                img *= alpha\n\n        # randomly swap channels\n        if random.randint(2):\n            img = img[..., random.permutation(3)]\n\n        results['img'] = img\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(\\nbrightness_delta={self.brightness_delta},\\n'\n        repr_str += 'contrast_range='\n        repr_str += f'{(self.contrast_lower, self.contrast_upper)},\\n'\n        repr_str += 'saturation_range='\n        repr_str += f'{(self.saturation_lower, self.saturation_upper)},\\n'\n        repr_str += f'hue_delta={self.hue_delta})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass Expand:\n    \"\"\"Random expand the image & bboxes.\n\n    Randomly place the original image on a canvas of 'ratio' x original image\n    size filled with mean values. The ratio is in the range of ratio_range.\n\n    Args:\n        mean (tuple): mean value of dataset.\n        to_rgb (bool): if need to convert the order of mean to align with RGB.\n        ratio_range (tuple): range of expand ratio.\n        prob (float): probability of applying this transformation\n    \"\"\"\n\n    def __init__(self,\n                 mean=(0, 0, 0),\n                 to_rgb=True,\n                 ratio_range=(1, 4),\n                 seg_ignore_label=None,\n                 prob=0.5):\n        self.to_rgb = to_rgb\n        self.ratio_range = ratio_range\n        if to_rgb:\n            self.mean = mean[::-1]\n        else:\n            self.mean = mean\n        self.min_ratio, self.max_ratio = ratio_range\n        self.seg_ignore_label = seg_ignore_label\n        self.prob = prob\n\n    def __call__(self, results):\n        \"\"\"Call function to expand images, bounding boxes.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with images, bounding boxes expanded\n        \"\"\"\n\n        if random.uniform(0, 1) > self.prob:\n            return results\n\n        if 'img_fields' in results:\n            assert results['img_fields'] == ['img'], \\\n                'Only single img_fields is allowed'\n        img = results['img']\n\n        h, w, c = img.shape\n        ratio = random.uniform(self.min_ratio, self.max_ratio)\n        # speedup expand when meets large image\n        if np.all(self.mean == self.mean[0]):\n            expand_img = np.empty((int(h * ratio), int(w * ratio), c),\n                                  img.dtype)\n            expand_img.fill(self.mean[0])\n        else:\n            expand_img = np.full((int(h * ratio), int(w * ratio), c),\n                                 self.mean,\n                                 dtype=img.dtype)\n        left = int(random.uniform(0, w * ratio - w))\n        top = int(random.uniform(0, h * ratio - h))\n        expand_img[top:top + h, left:left + w] = img\n\n        results['img'] = expand_img\n        # expand bboxes\n        for key in results.get('bbox_fields', []):\n            results[key] = results[key] + np.tile(\n                (left, top), 2).astype(results[key].dtype)\n\n        # expand masks\n        for key in results.get('mask_fields', []):\n            results[key] = results[key].expand(\n                int(h * ratio), int(w * ratio), top, left)\n\n        # expand segs\n        for key in results.get('seg_fields', []):\n            gt_seg = results[key]\n            expand_gt_seg = np.full((int(h * ratio), int(w * ratio)),\n                                    self.seg_ignore_label,\n                                    dtype=gt_seg.dtype)\n            expand_gt_seg[top:top + h, left:left + w] = gt_seg\n            results[key] = expand_gt_seg\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, '\n        repr_str += f'ratio_range={self.ratio_range}, '\n        repr_str += f'seg_ignore_label={self.seg_ignore_label})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass MinIoURandomCrop:\n    \"\"\"Random crop the image & bboxes, the cropped patches have minimum IoU\n    requirement with original image & bboxes, the IoU threshold is randomly\n    selected from min_ious.\n\n    Args:\n        min_ious (tuple): minimum IoU threshold for all intersections with\n        bounding boxes\n        min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,\n        where a >= min_crop_size).\n        bbox_clip_border (bool, optional): Whether clip the objects outside\n            the border of the image. Defaults to True.\n\n    Note:\n        The keys for bboxes, labels and masks should be paired. That is, \\\n        `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and \\\n        `gt_bboxes_ignore` to `gt_labels_ignore` and `gt_masks_ignore`.\n    \"\"\"\n\n    def __init__(self,\n                 min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),\n                 min_crop_size=0.3,\n                 bbox_clip_border=True):\n        # 1: return ori img\n        self.min_ious = min_ious\n        self.sample_mode = (1, *min_ious, 0)\n        self.min_crop_size = min_crop_size\n        self.bbox_clip_border = bbox_clip_border\n        self.bbox2label = {\n            'gt_bboxes': 'gt_labels',\n            'gt_bboxes_ignore': 'gt_labels_ignore'\n        }\n        self.bbox2mask = {\n            'gt_bboxes': 'gt_masks',\n            'gt_bboxes_ignore': 'gt_masks_ignore'\n        }\n\n    def __call__(self, results):\n        \"\"\"Call function to crop images and bounding boxes with minimum IoU\n        constraint.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with images and bounding boxes cropped, \\\n                'img_shape' key is updated.\n        \"\"\"\n\n        if 'img_fields' in results:\n            assert results['img_fields'] == ['img'], \\\n                'Only single img_fields is allowed'\n        img = results['img']\n        assert 'bbox_fields' in results\n        boxes = [results[key] for key in results['bbox_fields']]\n        boxes = np.concatenate(boxes, 0)\n        h, w, c = img.shape\n        while True:\n            mode = random.choice(self.sample_mode)\n            self.mode = mode\n            if mode == 1:\n                return results\n\n            min_iou = mode\n            for i in range(50):\n                new_w = random.uniform(self.min_crop_size * w, w)\n                new_h = random.uniform(self.min_crop_size * h, h)\n\n                # h / w in [0.5, 2]\n                if new_h / new_w < 0.5 or new_h / new_w > 2:\n                    continue\n\n                left = random.uniform(w - new_w)\n                top = random.uniform(h - new_h)\n\n                patch = np.array(\n                    (int(left), int(top), int(left + new_w), int(top + new_h)))\n                # Line or point crop is not allowed\n                if patch[2] == patch[0] or patch[3] == patch[1]:\n                    continue\n                overlaps = bbox_overlaps(\n                    patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)\n                if len(overlaps) > 0 and overlaps.min() < min_iou:\n                    continue\n\n                # center of boxes should inside the crop img\n                # only adjust boxes and instance masks when the gt is not empty\n                if len(overlaps) > 0:\n                    # adjust boxes\n                    def is_center_of_bboxes_in_patch(boxes, patch):\n                        center = (boxes[:, :2] + boxes[:, 2:]) / 2\n                        mask = ((center[:, 0] > patch[0]) *\n                                (center[:, 1] > patch[1]) *\n                                (center[:, 0] < patch[2]) *\n                                (center[:, 1] < patch[3]))\n                        return mask\n\n                    mask = is_center_of_bboxes_in_patch(boxes, patch)\n                    if not mask.any():\n                        continue\n                    for key in results.get('bbox_fields', []):\n                        boxes = results[key].copy()\n                        mask = is_center_of_bboxes_in_patch(boxes, patch)\n                        boxes = boxes[mask]\n                        if self.bbox_clip_border:\n                            boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])\n                            boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])\n                        boxes -= np.tile(patch[:2], 2)\n\n                        results[key] = boxes\n                        # labels\n                        label_key = self.bbox2label.get(key)\n                        if label_key in results:\n                            results[label_key] = results[label_key][mask]\n\n                        # mask fields\n                        mask_key = self.bbox2mask.get(key)\n                        if mask_key in results:\n                            results[mask_key] = results[mask_key][\n                                mask.nonzero()[0]].crop(patch)\n                # adjust the img no matter whether the gt is empty before crop\n                img = img[patch[1]:patch[3], patch[0]:patch[2]]\n                results['img'] = img\n                results['img_shape'] = img.shape\n\n                # seg fields\n                for key in results.get('seg_fields', []):\n                    results[key] = results[key][patch[1]:patch[3],\n                                                patch[0]:patch[2]]\n                return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(min_ious={self.min_ious}, '\n        repr_str += f'min_crop_size={self.min_crop_size}, '\n        repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass Corrupt:\n    \"\"\"Corruption augmentation.\n\n    Corruption transforms implemented based on\n    `imagecorruptions <https://github.com/bethgelab/imagecorruptions>`_.\n\n    Args:\n        corruption (str): Corruption name.\n        severity (int, optional): The severity of corruption. Default: 1.\n    \"\"\"\n\n    def __init__(self, corruption, severity=1):\n        self.corruption = corruption\n        self.severity = severity\n\n    def __call__(self, results):\n        \"\"\"Call function to corrupt image.\n\n        Args:\n            results (dict): Result dict from loading pipeline.\n\n        Returns:\n            dict: Result dict with images corrupted.\n        \"\"\"\n\n        if corrupt is None:\n            raise RuntimeError('imagecorruptions is not installed')\n        if 'img_fields' in results:\n            assert results['img_fields'] == ['img'], \\\n                'Only single img_fields is allowed'\n        results['img'] = corrupt(\n            results['img'].astype(np.uint8),\n            corruption_name=self.corruption,\n            severity=self.severity)\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(corruption={self.corruption}, '\n        repr_str += f'severity={self.severity})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass Albu:\n    \"\"\"Albumentation augmentation.\n\n    Adds custom transformations from Albumentations library.\n    Please, visit `https://albumentations.readthedocs.io`\n    to get more information.\n\n    An example of ``transforms`` is as followed:\n\n    .. code-block::\n\n        [\n            dict(\n                type='ShiftScaleRotate',\n                shift_limit=0.0625,\n                scale_limit=0.0,\n                rotate_limit=0,\n                interpolation=1,\n                p=0.5),\n            dict(\n                type='RandomBrightnessContrast',\n                brightness_limit=[0.1, 0.3],\n                contrast_limit=[0.1, 0.3],\n                p=0.2),\n            dict(type='ChannelShuffle', p=0.1),\n            dict(\n                type='OneOf',\n                transforms=[\n                    dict(type='Blur', blur_limit=3, p=1.0),\n                    dict(type='MedianBlur', blur_limit=3, p=1.0)\n                ],\n                p=0.1),\n        ]\n\n    Args:\n        transforms (list[dict]): A list of albu transformations\n        bbox_params (dict): Bbox_params for albumentation `Compose`\n        keymap (dict): Contains {'input key':'albumentation-style key'}\n        skip_img_without_anno (bool): Whether to skip the image if no ann left\n            after aug\n    \"\"\"\n\n    def __init__(self,\n                 transforms,\n                 bbox_params=None,\n                 keymap=None,\n                 update_pad_shape=False,\n                 skip_img_without_anno=False):\n        if Compose is None:\n            raise RuntimeError('albumentations is not installed')\n\n        # Args will be modified later, copying it will be safer\n        transforms = copy.deepcopy(transforms)\n        if bbox_params is not None:\n            bbox_params = copy.deepcopy(bbox_params)\n        if keymap is not None:\n            keymap = copy.deepcopy(keymap)\n        self.transforms = transforms\n        self.filter_lost_elements = False\n        self.update_pad_shape = update_pad_shape\n        self.skip_img_without_anno = skip_img_without_anno\n\n        # A simple workaround to remove masks without boxes\n        if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params\n                and 'filter_lost_elements' in bbox_params):\n            self.filter_lost_elements = True\n            self.origin_label_fields = bbox_params['label_fields']\n            bbox_params['label_fields'] = ['idx_mapper']\n            del bbox_params['filter_lost_elements']\n\n        self.bbox_params = (\n            self.albu_builder(bbox_params) if bbox_params else None)\n        self.aug = Compose([self.albu_builder(t) for t in self.transforms],\n                           bbox_params=self.bbox_params)\n\n        if not keymap:\n            self.keymap_to_albu = {\n                'img': 'image',\n                'gt_masks': 'masks',\n                'gt_bboxes': 'bboxes'\n            }\n        else:\n            self.keymap_to_albu = keymap\n        self.keymap_back = {v: k for k, v in self.keymap_to_albu.items()}\n\n    def albu_builder(self, cfg):\n        \"\"\"Import a module from albumentations.\n\n        It inherits some of :func:`build_from_cfg` logic.\n\n        Args:\n            cfg (dict): Config dict. It should at least contain the key \"type\".\n\n        Returns:\n            obj: The constructed object.\n        \"\"\"\n\n        assert isinstance(cfg, dict) and 'type' in cfg\n        args = cfg.copy()\n\n        obj_type = args.pop('type')\n        if mmcv.is_str(obj_type):\n            if albumentations is None:\n                raise RuntimeError('albumentations is not installed')\n            obj_cls = getattr(albumentations, obj_type)\n        elif inspect.isclass(obj_type):\n            obj_cls = obj_type\n        else:\n            raise TypeError(\n                f'type must be a str or valid type, but got {type(obj_type)}')\n\n        if 'transforms' in args:\n            args['transforms'] = [\n                self.albu_builder(transform)\n                for transform in args['transforms']\n            ]\n\n        return obj_cls(**args)\n\n    @staticmethod\n    def mapper(d, keymap):\n        \"\"\"Dictionary mapper. Renames keys according to keymap provided.\n\n        Args:\n            d (dict): old dict\n            keymap (dict): {'old_key':'new_key'}\n        Returns:\n            dict: new dict.\n        \"\"\"\n\n        updated_dict = {}\n        for k, v in zip(d.keys(), d.values()):\n            new_k = keymap.get(k, k)\n            updated_dict[new_k] = d[k]\n        return updated_dict\n\n    def __call__(self, results):\n        # dict to albumentations format\n        results = self.mapper(results, self.keymap_to_albu)\n        # TODO: add bbox_fields\n        if 'bboxes' in results:\n            # to list of boxes\n            if isinstance(results['bboxes'], np.ndarray):\n                results['bboxes'] = [x for x in results['bboxes']]\n            # add pseudo-field for filtration\n            if self.filter_lost_elements:\n                results['idx_mapper'] = np.arange(len(results['bboxes']))\n\n        # TODO: Support mask structure in albu\n        if 'masks' in results:\n            if isinstance(results['masks'], PolygonMasks):\n                raise NotImplementedError(\n                    'Albu only supports BitMap masks now')\n            ori_masks = results['masks']\n            if albumentations.__version__ < '0.5':\n                results['masks'] = results['masks'].masks\n            else:\n                results['masks'] = [mask for mask in results['masks'].masks]\n\n        results = self.aug(**results)\n\n        if 'bboxes' in results:\n            if isinstance(results['bboxes'], list):\n                results['bboxes'] = np.array(\n                    results['bboxes'], dtype=np.float32)\n            results['bboxes'] = results['bboxes'].reshape(-1, 4)\n\n            # filter label_fields\n            if self.filter_lost_elements:\n\n                for label in self.origin_label_fields:\n                    results[label] = np.array(\n                        [results[label][i] for i in results['idx_mapper']])\n                if 'masks' in results:\n                    results['masks'] = np.array(\n                        [results['masks'][i] for i in results['idx_mapper']])\n                    results['masks'] = ori_masks.__class__(\n                        results['masks'], results['image'].shape[0],\n                        results['image'].shape[1])\n\n                if (not len(results['idx_mapper'])\n                        and self.skip_img_without_anno):\n                    return None\n\n        if 'gt_labels' in results:\n            if isinstance(results['gt_labels'], list):\n                results['gt_labels'] = np.array(results['gt_labels'])\n            results['gt_labels'] = results['gt_labels'].astype(np.int64)\n\n        # back to the original format\n        results = self.mapper(results, self.keymap_back)\n\n        # update final shape\n        if self.update_pad_shape:\n            results['pad_shape'] = results['img'].shape\n\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass RandomCenterCropPad:\n    \"\"\"Random center crop and random around padding for CornerNet.\n\n    This operation generates randomly cropped image from the original image and\n    pads it simultaneously. Different from :class:`RandomCrop`, the output\n    shape may not equal to ``crop_size`` strictly. We choose a random value\n    from ``ratios`` and the output shape could be larger or smaller than\n    ``crop_size``. The padding operation is also different from :class:`Pad`,\n    here we use around padding instead of right-bottom padding.\n\n    The relation between output image (padding image) and original image:\n\n    .. code:: text\n\n                        output image\n\n               +----------------------------+\n               |          padded area       |\n        +------|----------------------------|----------+\n        |      |         cropped area       |          |\n        |      |         +---------------+  |          |\n        |      |         |    .   center |  |          | original image\n        |      |         |        range  |  |          |\n        |      |         +---------------+  |          |\n        +------|----------------------------|----------+\n               |          padded area       |\n               +----------------------------+\n\n    There are 5 main areas in the figure:\n\n    - output image: output image of this operation, also called padding\n      image in following instruction.\n    - original image: input image of this operation.\n    - padded area: non-intersect area of output image and original image.\n    - cropped area: the overlap of output image and original image.\n    - center range: a smaller area where random center chosen from.\n      center range is computed by ``border`` and original image's shape\n      to avoid our random center is too close to original image's border.\n\n    Also this operation act differently in train and test mode, the summary\n    pipeline is listed below.\n\n    Train pipeline:\n\n    1. Choose a ``random_ratio`` from ``ratios``, the shape of padding image\n       will be ``random_ratio * crop_size``.\n    2. Choose a ``random_center`` in center range.\n    3. Generate padding image with center matches the ``random_center``.\n    4. Initialize the padding image with pixel value equals to ``mean``.\n    5. Copy the cropped area to padding image.\n    6. Refine annotations.\n\n    Test pipeline:\n\n    1. Compute output shape according to ``test_pad_mode``.\n    2. Generate padding image with center matches the original image\n       center.\n    3. Initialize the padding image with pixel value equals to ``mean``.\n    4. Copy the ``cropped area`` to padding image.\n\n    Args:\n        crop_size (tuple | None): expected size after crop, final size will\n            computed according to ratio. Requires (h, w) in train mode, and\n            None in test mode.\n        ratios (tuple): random select a ratio from tuple and crop image to\n            (crop_size[0] * ratio) * (crop_size[1] * ratio).\n            Only available in train mode.\n        border (int): max distance from center select area to image border.\n            Only available in train mode.\n        mean (sequence): Mean values of 3 channels.\n        std (sequence): Std values of 3 channels.\n        to_rgb (bool): Whether to convert the image from BGR to RGB.\n        test_mode (bool): whether involve random variables in transform.\n            In train mode, crop_size is fixed, center coords and ratio is\n            random selected from predefined lists. In test mode, crop_size\n            is image's original shape, center coords and ratio is fixed.\n        test_pad_mode (tuple): padding method and padding shape value, only\n            available in test mode. Default is using 'logical_or' with\n            127 as padding shape value.\n\n            - 'logical_or': final_shape = input_shape | padding_shape_value\n            - 'size_divisor': final_shape = int(\n              ceil(input_shape / padding_shape_value) * padding_shape_value)\n        test_pad_add_pix (int): Extra padding pixel in test mode. Default 0.\n        bbox_clip_border (bool, optional): Whether clip the objects outside\n            the border of the image. Defaults to True.\n    \"\"\"\n\n    def __init__(self,\n                 crop_size=None,\n                 ratios=(0.9, 1.0, 1.1),\n                 border=128,\n                 mean=None,\n                 std=None,\n                 to_rgb=None,\n                 test_mode=False,\n                 test_pad_mode=('logical_or', 127),\n                 test_pad_add_pix=0,\n                 bbox_clip_border=True):\n        if test_mode:\n            assert crop_size is None, 'crop_size must be None in test mode'\n            assert ratios is None, 'ratios must be None in test mode'\n            assert border is None, 'border must be None in test mode'\n            assert isinstance(test_pad_mode, (list, tuple))\n            assert test_pad_mode[0] in ['logical_or', 'size_divisor']\n        else:\n            assert isinstance(crop_size, (list, tuple))\n            assert crop_size[0] > 0 and crop_size[1] > 0, (\n                'crop_size must > 0 in train mode')\n            assert isinstance(ratios, (list, tuple))\n            assert test_pad_mode is None, (\n                'test_pad_mode must be None in train mode')\n\n        self.crop_size = crop_size\n        self.ratios = ratios\n        self.border = border\n        # We do not set default value to mean, std and to_rgb because these\n        # hyper-parameters are easy to forget but could affect the performance.\n        # Please use the same setting as Normalize for performance assurance.\n        assert mean is not None and std is not None and to_rgb is not None\n        self.to_rgb = to_rgb\n        self.input_mean = mean\n        self.input_std = std\n        if to_rgb:\n            self.mean = mean[::-1]\n            self.std = std[::-1]\n        else:\n            self.mean = mean\n            self.std = std\n        self.test_mode = test_mode\n        self.test_pad_mode = test_pad_mode\n        self.test_pad_add_pix = test_pad_add_pix\n        self.bbox_clip_border = bbox_clip_border\n\n    def _get_border(self, border, size):\n        \"\"\"Get final border for the target size.\n\n        This function generates a ``final_border`` according to image's shape.\n        The area between ``final_border`` and ``size - final_border`` is the\n        ``center range``. We randomly choose center from the ``center range``\n        to avoid our random center is too close to original image's border.\n        Also ``center range`` should be larger than 0.\n\n        Args:\n            border (int): The initial border, default is 128.\n            size (int): The width or height of original image.\n        Returns:\n            int: The final border.\n        \"\"\"\n        k = 2 * border / size\n        i = pow(2, np.ceil(np.log2(np.ceil(k))) + (k == int(k)))\n        return border // i\n\n    def _filter_boxes(self, patch, boxes):\n        \"\"\"Check whether the center of each box is in the patch.\n\n        Args:\n            patch (list[int]): The cropped area, [left, top, right, bottom].\n            boxes (numpy array, (N x 4)): Ground truth boxes.\n\n        Returns:\n            mask (numpy array, (N,)): Each box is inside or outside the patch.\n        \"\"\"\n        center = (boxes[:, :2] + boxes[:, 2:]) / 2\n        mask = (center[:, 0] > patch[0]) * (center[:, 1] > patch[1]) * (\n            center[:, 0] < patch[2]) * (\n                center[:, 1] < patch[3])\n        return mask\n\n    def _crop_image_and_paste(self, image, center, size):\n        \"\"\"Crop image with a given center and size, then paste the cropped\n        image to a blank image with two centers align.\n\n        This function is equivalent to generating a blank image with ``size``\n        as its shape. Then cover it on the original image with two centers (\n        the center of blank image and the random center of original image)\n        aligned. The overlap area is paste from the original image and the\n        outside area is filled with ``mean pixel``.\n\n        Args:\n            image (np array, H x W x C): Original image.\n            center (list[int]): Target crop center coord.\n            size (list[int]): Target crop size. [target_h, target_w]\n\n        Returns:\n            cropped_img (np array, target_h x target_w x C): Cropped image.\n            border (np array, 4): The distance of four border of\n                ``cropped_img`` to the original image area, [top, bottom,\n                left, right]\n            patch (list[int]): The cropped area, [left, top, right, bottom].\n        \"\"\"\n        center_y, center_x = center\n        target_h, target_w = size\n        img_h, img_w, img_c = image.shape\n\n        x0 = max(0, center_x - target_w // 2)\n        x1 = min(center_x + target_w // 2, img_w)\n        y0 = max(0, center_y - target_h // 2)\n        y1 = min(center_y + target_h // 2, img_h)\n        patch = np.array((int(x0), int(y0), int(x1), int(y1)))\n\n        left, right = center_x - x0, x1 - center_x\n        top, bottom = center_y - y0, y1 - center_y\n\n        cropped_center_y, cropped_center_x = target_h // 2, target_w // 2\n        cropped_img = np.zeros((target_h, target_w, img_c), dtype=image.dtype)\n        for i in range(img_c):\n            cropped_img[:, :, i] += self.mean[i]\n        y_slice = slice(cropped_center_y - top, cropped_center_y + bottom)\n        x_slice = slice(cropped_center_x - left, cropped_center_x + right)\n        cropped_img[y_slice, x_slice, :] = image[y0:y1, x0:x1, :]\n\n        border = np.array([\n            cropped_center_y - top, cropped_center_y + bottom,\n            cropped_center_x - left, cropped_center_x + right\n        ],\n                          dtype=np.float32)\n\n        return cropped_img, border, patch\n\n    def _train_aug(self, results):\n        \"\"\"Random crop and around padding the original image.\n\n        Args:\n            results (dict): Image infomations in the augment pipeline.\n\n        Returns:\n            results (dict): The updated dict.\n        \"\"\"\n        img = results['img']\n        h, w, c = img.shape\n        boxes = results['gt_bboxes']\n        while True:\n            scale = random.choice(self.ratios)\n            new_h = int(self.crop_size[0] * scale)\n            new_w = int(self.crop_size[1] * scale)\n            h_border = self._get_border(self.border, h)\n            w_border = self._get_border(self.border, w)\n\n            for i in range(50):\n                center_x = random.randint(low=w_border, high=w - w_border)\n                center_y = random.randint(low=h_border, high=h - h_border)\n\n                cropped_img, border, patch = self._crop_image_and_paste(\n                    img, [center_y, center_x], [new_h, new_w])\n\n                mask = self._filter_boxes(patch, boxes)\n                # if image do not have valid bbox, any crop patch is valid.\n                if not mask.any() and len(boxes) > 0:\n                    continue\n\n                results['img'] = cropped_img\n                results['img_shape'] = cropped_img.shape\n                results['pad_shape'] = cropped_img.shape\n\n                x0, y0, x1, y1 = patch\n\n                left_w, top_h = center_x - x0, center_y - y0\n                cropped_center_x, cropped_center_y = new_w // 2, new_h // 2\n\n                # crop bboxes accordingly and clip to the image boundary\n                for key in results.get('bbox_fields', []):\n                    mask = self._filter_boxes(patch, results[key])\n                    bboxes = results[key][mask]\n                    bboxes[:, 0:4:2] += cropped_center_x - left_w - x0\n                    bboxes[:, 1:4:2] += cropped_center_y - top_h - y0\n                    if self.bbox_clip_border:\n                        bboxes[:, 0:4:2] = np.clip(bboxes[:, 0:4:2], 0, new_w)\n                        bboxes[:, 1:4:2] = np.clip(bboxes[:, 1:4:2], 0, new_h)\n                    keep = (bboxes[:, 2] > bboxes[:, 0]) & (\n                        bboxes[:, 3] > bboxes[:, 1])\n                    bboxes = bboxes[keep]\n                    results[key] = bboxes\n                    if key in ['gt_bboxes']:\n                        if 'gt_labels' in results:\n                            labels = results['gt_labels'][mask]\n                            labels = labels[keep]\n                            results['gt_labels'] = labels\n                        if 'gt_masks' in results:\n                            raise NotImplementedError(\n                                'RandomCenterCropPad only supports bbox.')\n\n                # crop semantic seg\n                for key in results.get('seg_fields', []):\n                    raise NotImplementedError(\n                        'RandomCenterCropPad only supports bbox.')\n                return results\n\n    def _test_aug(self, results):\n        \"\"\"Around padding the original image without cropping.\n\n        The padding mode and value are from ``test_pad_mode``.\n\n        Args:\n            results (dict): Image infomations in the augment pipeline.\n\n        Returns:\n            results (dict): The updated dict.\n        \"\"\"\n        img = results['img']\n        h, w, c = img.shape\n        results['img_shape'] = img.shape\n        if self.test_pad_mode[0] in ['logical_or']:\n            # self.test_pad_add_pix is only used for centernet\n            target_h = (h | self.test_pad_mode[1]) + self.test_pad_add_pix\n            target_w = (w | self.test_pad_mode[1]) + self.test_pad_add_pix\n        elif self.test_pad_mode[0] in ['size_divisor']:\n            divisor = self.test_pad_mode[1]\n            target_h = int(np.ceil(h / divisor)) * divisor\n            target_w = int(np.ceil(w / divisor)) * divisor\n        else:\n            raise NotImplementedError(\n                'RandomCenterCropPad only support two testing pad mode:'\n                'logical-or and size_divisor.')\n\n        cropped_img, border, _ = self._crop_image_and_paste(\n            img, [h // 2, w // 2], [target_h, target_w])\n        results['img'] = cropped_img\n        results['pad_shape'] = cropped_img.shape\n        results['border'] = border\n        return results\n\n    def __call__(self, results):\n        img = results['img']\n        assert img.dtype == np.float32, (\n            'RandomCenterCropPad needs the input image of dtype np.float32,'\n            ' please set \"to_float32=True\" in \"LoadImageFromFile\" pipeline')\n        h, w, c = img.shape\n        assert c == len(self.mean)\n        if self.test_mode:\n            return self._test_aug(results)\n        else:\n            return self._train_aug(results)\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(crop_size={self.crop_size}, '\n        repr_str += f'ratios={self.ratios}, '\n        repr_str += f'border={self.border}, '\n        repr_str += f'mean={self.input_mean}, '\n        repr_str += f'std={self.input_std}, '\n        repr_str += f'to_rgb={self.to_rgb}, '\n        repr_str += f'test_mode={self.test_mode}, '\n        repr_str += f'test_pad_mode={self.test_pad_mode}, '\n        repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass CutOut:\n    \"\"\"CutOut operation.\n\n    Randomly drop some regions of image used in\n    `Cutout <https://arxiv.org/abs/1708.04552>`_.\n\n    Args:\n        n_holes (int | tuple[int, int]): Number of regions to be dropped.\n            If it is given as a list, number of holes will be randomly\n            selected from the closed interval [`n_holes[0]`, `n_holes[1]`].\n        cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate\n            shape of dropped regions. It can be `tuple[int, int]` to use a\n            fixed cutout shape, or `list[tuple[int, int]]` to randomly choose\n            shape from the list.\n        cutout_ratio (tuple[float, float] | list[tuple[float, float]]): The\n            candidate ratio of dropped regions. It can be `tuple[float, float]`\n            to use a fixed ratio or `list[tuple[float, float]]` to randomly\n            choose ratio from the list. Please note that `cutout_shape`\n            and `cutout_ratio` cannot be both given at the same time.\n        fill_in (tuple[float, float, float] | tuple[int, int, int]): The value\n            of pixel to fill in the dropped regions. Default: (0, 0, 0).\n    \"\"\"\n\n    def __init__(self,\n                 n_holes,\n                 cutout_shape=None,\n                 cutout_ratio=None,\n                 fill_in=(0, 0, 0)):\n\n        assert (cutout_shape is None) ^ (cutout_ratio is None), \\\n            'Either cutout_shape or cutout_ratio should be specified.'\n        assert (isinstance(cutout_shape, (list, tuple))\n                or isinstance(cutout_ratio, (list, tuple)))\n        if isinstance(n_holes, tuple):\n            assert len(n_holes) == 2 and 0 <= n_holes[0] < n_holes[1]\n        else:\n            n_holes = (n_holes, n_holes)\n        self.n_holes = n_holes\n        self.fill_in = fill_in\n        self.with_ratio = cutout_ratio is not None\n        self.candidates = cutout_ratio if self.with_ratio else cutout_shape\n        if not isinstance(self.candidates, list):\n            self.candidates = [self.candidates]\n\n    def __call__(self, results):\n        \"\"\"Call function to drop some regions of image.\"\"\"\n        h, w, c = results['img'].shape\n        n_holes = np.random.randint(self.n_holes[0], self.n_holes[1] + 1)\n        for _ in range(n_holes):\n            x1 = np.random.randint(0, w)\n            y1 = np.random.randint(0, h)\n            index = np.random.randint(0, len(self.candidates))\n            if not self.with_ratio:\n                cutout_w, cutout_h = self.candidates[index]\n            else:\n                cutout_w = int(self.candidates[index][0] * w)\n                cutout_h = int(self.candidates[index][1] * h)\n\n            x2 = np.clip(x1 + cutout_w, 0, w)\n            y2 = np.clip(y1 + cutout_h, 0, h)\n            results['img'][y1:y2, x1:x2, :] = self.fill_in\n\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(n_holes={self.n_holes}, '\n        repr_str += (f'cutout_ratio={self.candidates}, ' if self.with_ratio\n                     else f'cutout_shape={self.candidates}, ')\n        repr_str += f'fill_in={self.fill_in})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass Mosaic:\n    \"\"\"Mosaic augmentation.\n\n    Given 4 images, mosaic transform combines them into\n    one output image. The output image is composed of the parts from each sub-\n    image.\n\n    .. code:: text\n\n                        mosaic transform\n                           center_x\n                +------------------------------+\n                |       pad        |  pad      |\n                |      +-----------+           |\n                |      |           |           |\n                |      |  image1   |--------+  |\n                |      |           |        |  |\n                |      |           | image2 |  |\n     center_y   |----+-------------+-----------|\n                |    |   cropped   |           |\n                |pad |   image3    |  image4   |\n                |    |             |           |\n                +----|-------------+-----------+\n                     |             |\n                     +-------------+\n\n     The mosaic transform steps are as follows:\n\n         1. Choose the mosaic center as the intersections of 4 images\n         2. Get the left top image according to the index, and randomly\n            sample another 3 images from the custom dataset.\n         3. Sub image will be cropped if image is larger than mosaic patch\n\n    Args:\n        img_scale (Sequence[int]): Image size after mosaic pipeline of single\n            image. The shape order should be (height, width).\n            Default to (640, 640).\n        center_ratio_range (Sequence[float]): Center ratio range of mosaic\n            output. Default to (0.5, 1.5).\n        min_bbox_size (int | float): The minimum pixel for filtering\n            invalid bboxes after the mosaic pipeline. Default to 0.\n        bbox_clip_border (bool, optional): Whether to clip the objects outside\n            the border of the image. In some dataset like MOT17, the gt bboxes\n            are allowed to cross the border of images. Therefore, we don't\n            need to clip the gt bboxes in these cases. Defaults to True.\n        skip_filter (bool): Whether to skip filtering rules. If it\n            is True, the filter rule will not be applied, and the\n            `min_bbox_size` is invalid. Default to True.\n        pad_val (int): Pad value. Default to 114.\n        prob (float): Probability of applying this transformation.\n            Default to 1.0.\n    \"\"\"\n\n    def __init__(self,\n                 img_scale=(640, 640),\n                 center_ratio_range=(0.5, 1.5),\n                 min_bbox_size=0,\n                 bbox_clip_border=True,\n                 skip_filter=True,\n                 pad_val=114,\n                 prob=1.0):\n        assert isinstance(img_scale, tuple)\n        assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. '\\\n            f'got {prob}.'\n\n        log_img_scale(img_scale, skip_square=True)\n        self.img_scale = img_scale\n        self.center_ratio_range = center_ratio_range\n        self.min_bbox_size = min_bbox_size\n        self.bbox_clip_border = bbox_clip_border\n        self.skip_filter = skip_filter\n        self.pad_val = pad_val\n        self.prob = prob\n\n    def __call__(self, results):\n        \"\"\"Call function to make a mosaic of image.\n\n        Args:\n            results (dict): Result dict.\n\n        Returns:\n            dict: Result dict with mosaic transformed.\n        \"\"\"\n\n        if random.uniform(0, 1) > self.prob:\n            return results\n\n        results = self._mosaic_transform(results)\n        return results\n\n    def get_indexes(self, dataset):\n        \"\"\"Call function to collect indexes.\n\n        Args:\n            dataset (:obj:`MultiImageMixDataset`): The dataset.\n\n        Returns:\n            list: indexes.\n        \"\"\"\n\n        indexes = [random.randint(0, len(dataset)) for _ in range(3)]\n        return indexes\n\n    def _mosaic_transform(self, results):\n        \"\"\"Mosaic transform function.\n\n        Args:\n            results (dict): Result dict.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n\n        assert 'mix_results' in results\n        mosaic_labels = []\n        mosaic_bboxes = []\n        if len(results['img'].shape) == 3:\n            mosaic_img = np.full(\n                (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2), 3),\n                self.pad_val,\n                dtype=results['img'].dtype)\n        else:\n            mosaic_img = np.full(\n                (int(self.img_scale[0] * 2), int(self.img_scale[1] * 2)),\n                self.pad_val,\n                dtype=results['img'].dtype)\n\n        # mosaic center x, y\n        center_x = int(\n            random.uniform(*self.center_ratio_range) * self.img_scale[1])\n        center_y = int(\n            random.uniform(*self.center_ratio_range) * self.img_scale[0])\n        center_position = (center_x, center_y)\n\n        loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n        for i, loc in enumerate(loc_strs):\n            if loc == 'top_left':\n                results_patch = copy.deepcopy(results)\n            else:\n                results_patch = copy.deepcopy(results['mix_results'][i - 1])\n\n            img_i = results_patch['img']\n            h_i, w_i = img_i.shape[:2]\n            # keep_ratio resize\n            scale_ratio_i = min(self.img_scale[0] / h_i,\n                                self.img_scale[1] / w_i)\n            img_i = mmcv.imresize(\n                img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))\n\n            # compute the combine parameters\n            paste_coord, crop_coord = self._mosaic_combine(\n                loc, center_position, img_i.shape[:2][::-1])\n            x1_p, y1_p, x2_p, y2_p = paste_coord\n            x1_c, y1_c, x2_c, y2_c = crop_coord\n\n            # crop and paste image\n            mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]\n\n            # adjust coordinate\n            gt_bboxes_i = results_patch['gt_bboxes']\n            gt_labels_i = results_patch['gt_labels']\n\n            if gt_bboxes_i.shape[0] > 0:\n                padw = x1_p - x1_c\n                padh = y1_p - y1_c\n                gt_bboxes_i[:, 0::2] = \\\n                    scale_ratio_i * gt_bboxes_i[:, 0::2] + padw\n                gt_bboxes_i[:, 1::2] = \\\n                    scale_ratio_i * gt_bboxes_i[:, 1::2] + padh\n\n            mosaic_bboxes.append(gt_bboxes_i)\n            mosaic_labels.append(gt_labels_i)\n\n        if len(mosaic_labels) > 0:\n            mosaic_bboxes = np.concatenate(mosaic_bboxes, 0)\n            mosaic_labels = np.concatenate(mosaic_labels, 0)\n\n            if self.bbox_clip_border:\n                mosaic_bboxes[:, 0::2] = np.clip(mosaic_bboxes[:, 0::2], 0,\n                                                 2 * self.img_scale[1])\n                mosaic_bboxes[:, 1::2] = np.clip(mosaic_bboxes[:, 1::2], 0,\n                                                 2 * self.img_scale[0])\n\n            if not self.skip_filter:\n                mosaic_bboxes, mosaic_labels = \\\n                    self._filter_box_candidates(mosaic_bboxes, mosaic_labels)\n\n        # remove outside bboxes\n        inside_inds = find_inside_bboxes(mosaic_bboxes, 2 * self.img_scale[0],\n                                         2 * self.img_scale[1])\n        mosaic_bboxes = mosaic_bboxes[inside_inds]\n        mosaic_labels = mosaic_labels[inside_inds]\n\n        results['img'] = mosaic_img\n        results['img_shape'] = mosaic_img.shape\n        results['gt_bboxes'] = mosaic_bboxes\n        results['gt_labels'] = mosaic_labels\n\n        return results\n\n    def _mosaic_combine(self, loc, center_position_xy, img_shape_wh):\n        \"\"\"Calculate global coordinate of mosaic image and local coordinate of\n        cropped sub-image.\n\n        Args:\n            loc (str): Index for the sub-image, loc in ('top_left',\n              'top_right', 'bottom_left', 'bottom_right').\n            center_position_xy (Sequence[float]): Mixing center for 4 images,\n                (x, y).\n            img_shape_wh (Sequence[int]): Width and height of sub-image\n\n        Returns:\n            tuple[tuple[float]]: Corresponding coordinate of pasting and\n                cropping\n                - paste_coord (tuple): paste corner coordinate in mosaic image.\n                - crop_coord (tuple): crop corner coordinate in mosaic image.\n        \"\"\"\n        assert loc in ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n        if loc == 'top_left':\n            # index0 to top left part of image\n            x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \\\n                             max(center_position_xy[1] - img_shape_wh[1], 0), \\\n                             center_position_xy[0], \\\n                             center_position_xy[1]\n            crop_coord = img_shape_wh[0] - (x2 - x1), img_shape_wh[1] - (\n                y2 - y1), img_shape_wh[0], img_shape_wh[1]\n\n        elif loc == 'top_right':\n            # index1 to top right part of image\n            x1, y1, x2, y2 = center_position_xy[0], \\\n                             max(center_position_xy[1] - img_shape_wh[1], 0), \\\n                             min(center_position_xy[0] + img_shape_wh[0],\n                                 self.img_scale[1] * 2), \\\n                             center_position_xy[1]\n            crop_coord = 0, img_shape_wh[1] - (y2 - y1), min(\n                img_shape_wh[0], x2 - x1), img_shape_wh[1]\n\n        elif loc == 'bottom_left':\n            # index2 to bottom left part of image\n            x1, y1, x2, y2 = max(center_position_xy[0] - img_shape_wh[0], 0), \\\n                             center_position_xy[1], \\\n                             center_position_xy[0], \\\n                             min(self.img_scale[0] * 2, center_position_xy[1] +\n                                 img_shape_wh[1])\n            crop_coord = img_shape_wh[0] - (x2 - x1), 0, img_shape_wh[0], min(\n                y2 - y1, img_shape_wh[1])\n\n        else:\n            # index3 to bottom right part of image\n            x1, y1, x2, y2 = center_position_xy[0], \\\n                             center_position_xy[1], \\\n                             min(center_position_xy[0] + img_shape_wh[0],\n                                 self.img_scale[1] * 2), \\\n                             min(self.img_scale[0] * 2, center_position_xy[1] +\n                                 img_shape_wh[1])\n            crop_coord = 0, 0, min(img_shape_wh[0],\n                                   x2 - x1), min(y2 - y1, img_shape_wh[1])\n\n        paste_coord = x1, y1, x2, y2\n        return paste_coord, crop_coord\n\n    def _filter_box_candidates(self, bboxes, labels):\n        \"\"\"Filter out bboxes too small after Mosaic.\"\"\"\n        bbox_w = bboxes[:, 2] - bboxes[:, 0]\n        bbox_h = bboxes[:, 3] - bboxes[:, 1]\n        valid_inds = (bbox_w > self.min_bbox_size) & \\\n                     (bbox_h > self.min_bbox_size)\n        valid_inds = np.nonzero(valid_inds)[0]\n        return bboxes[valid_inds], labels[valid_inds]\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'img_scale={self.img_scale}, '\n        repr_str += f'center_ratio_range={self.center_ratio_range}, '\n        repr_str += f'pad_val={self.pad_val}, '\n        repr_str += f'min_bbox_size={self.min_bbox_size}, '\n        repr_str += f'skip_filter={self.skip_filter})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass MixUp:\n    \"\"\"MixUp data augmentation.\n\n    .. code:: text\n\n                         mixup transform\n                +------------------------------+\n                | mixup image   |              |\n                |      +--------|--------+     |\n                |      |        |        |     |\n                |---------------+        |     |\n                |      |                 |     |\n                |      |      image      |     |\n                |      |                 |     |\n                |      |                 |     |\n                |      |-----------------+     |\n                |             pad              |\n                +------------------------------+\n\n     The mixup transform steps are as follows:\n\n        1. Another random image is picked by dataset and embedded in\n           the top left patch(after padding and resizing)\n        2. The target of mixup transform is the weighted average of mixup\n           image and origin image.\n\n    Args:\n        img_scale (Sequence[int]): Image output size after mixup pipeline.\n            The shape order should be (height, width). Default: (640, 640).\n        ratio_range (Sequence[float]): Scale ratio of mixup image.\n            Default: (0.5, 1.5).\n        flip_ratio (float): Horizontal flip ratio of mixup image.\n            Default: 0.5.\n        pad_val (int): Pad value. Default: 114.\n        max_iters (int): The maximum number of iterations. If the number of\n            iterations is greater than `max_iters`, but gt_bbox is still\n            empty, then the iteration is terminated. Default: 15.\n        min_bbox_size (float): Width and height threshold to filter bboxes.\n            If the height or width of a box is smaller than this value, it\n            will be removed. Default: 5.\n        min_area_ratio (float): Threshold of area ratio between\n            original bboxes and wrapped bboxes. If smaller than this value,\n            the box will be removed. Default: 0.2.\n        max_aspect_ratio (float): Aspect ratio of width and height\n            threshold to filter bboxes. If max(h/w, w/h) larger than this\n            value, the box will be removed. Default: 20.\n        bbox_clip_border (bool, optional): Whether to clip the objects outside\n            the border of the image. In some dataset like MOT17, the gt bboxes\n            are allowed to cross the border of images. Therefore, we don't\n            need to clip the gt bboxes in these cases. Defaults to True.\n        skip_filter (bool): Whether to skip filtering rules. If it\n            is True, the filter rule will not be applied, and the\n            `min_bbox_size` and `min_area_ratio` and `max_aspect_ratio`\n            is invalid. Default to True.\n    \"\"\"\n\n    def __init__(self,\n                 img_scale=(640, 640),\n                 ratio_range=(0.5, 1.5),\n                 flip_ratio=0.5,\n                 pad_val=114,\n                 max_iters=15,\n                 min_bbox_size=5,\n                 min_area_ratio=0.2,\n                 max_aspect_ratio=20,\n                 bbox_clip_border=True,\n                 skip_filter=True):\n        assert isinstance(img_scale, tuple)\n        log_img_scale(img_scale, skip_square=True)\n        self.dynamic_scale = img_scale\n        self.ratio_range = ratio_range\n        self.flip_ratio = flip_ratio\n        self.pad_val = pad_val\n        self.max_iters = max_iters\n        self.min_bbox_size = min_bbox_size\n        self.min_area_ratio = min_area_ratio\n        self.max_aspect_ratio = max_aspect_ratio\n        self.bbox_clip_border = bbox_clip_border\n        self.skip_filter = skip_filter\n\n    def __call__(self, results):\n        \"\"\"Call function to make a mixup of image.\n\n        Args:\n            results (dict): Result dict.\n\n        Returns:\n            dict: Result dict with mixup transformed.\n        \"\"\"\n\n        results = self._mixup_transform(results)\n        return results\n\n    def get_indexes(self, dataset):\n        \"\"\"Call function to collect indexes.\n\n        Args:\n            dataset (:obj:`MultiImageMixDataset`): The dataset.\n\n        Returns:\n            list: indexes.\n        \"\"\"\n\n        for i in range(self.max_iters):\n            index = random.randint(0, len(dataset))\n            gt_bboxes_i = dataset.get_ann_info(index)['bboxes']\n            if len(gt_bboxes_i) != 0:\n                break\n\n        return index\n\n    def _mixup_transform(self, results):\n        \"\"\"MixUp transform function.\n\n        Args:\n            results (dict): Result dict.\n\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n\n        assert 'mix_results' in results\n        assert len(\n            results['mix_results']) == 1, 'MixUp only support 2 images now !'\n\n        if results['mix_results'][0]['gt_bboxes'].shape[0] == 0:\n            # empty bbox\n            return results\n\n        retrieve_results = results['mix_results'][0]\n        retrieve_img = retrieve_results['img']\n\n        jit_factor = random.uniform(*self.ratio_range)\n        is_filp = random.uniform(0, 1) < self.flip_ratio\n\n        if len(retrieve_img.shape) == 3:\n            out_img = np.ones(\n                (self.dynamic_scale[0], self.dynamic_scale[1], 3),\n                dtype=retrieve_img.dtype) * self.pad_val\n        else:\n            out_img = np.ones(\n                self.dynamic_scale, dtype=retrieve_img.dtype) * self.pad_val\n\n        # 1. keep_ratio resize\n        scale_ratio = min(self.dynamic_scale[0] / retrieve_img.shape[0],\n                          self.dynamic_scale[1] / retrieve_img.shape[1])\n        retrieve_img = mmcv.imresize(\n            retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),\n                           int(retrieve_img.shape[0] * scale_ratio)))\n\n        # 2. paste\n        out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img\n\n        # 3. scale jit\n        scale_ratio *= jit_factor\n        out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),\n                                          int(out_img.shape[0] * jit_factor)))\n\n        # 4. flip\n        if is_filp:\n            out_img = out_img[:, ::-1, :]\n\n        # 5. random crop\n        ori_img = results['img']\n        origin_h, origin_w = out_img.shape[:2]\n        target_h, target_w = ori_img.shape[:2]\n        padded_img = np.zeros(\n            (max(origin_h, target_h), max(origin_w,\n                                          target_w), 3)).astype(np.uint8)\n        padded_img[:origin_h, :origin_w] = out_img\n\n        x_offset, y_offset = 0, 0\n        if padded_img.shape[0] > target_h:\n            y_offset = random.randint(0, padded_img.shape[0] - target_h)\n        if padded_img.shape[1] > target_w:\n            x_offset = random.randint(0, padded_img.shape[1] - target_w)\n        padded_cropped_img = padded_img[y_offset:y_offset + target_h,\n                                        x_offset:x_offset + target_w]\n\n        # 6. adjust bbox\n        retrieve_gt_bboxes = retrieve_results['gt_bboxes']\n        retrieve_gt_bboxes[:, 0::2] = retrieve_gt_bboxes[:, 0::2] * scale_ratio\n        retrieve_gt_bboxes[:, 1::2] = retrieve_gt_bboxes[:, 1::2] * scale_ratio\n        if self.bbox_clip_border:\n            retrieve_gt_bboxes[:, 0::2] = np.clip(retrieve_gt_bboxes[:, 0::2],\n                                                  0, origin_w)\n            retrieve_gt_bboxes[:, 1::2] = np.clip(retrieve_gt_bboxes[:, 1::2],\n                                                  0, origin_h)\n\n        if is_filp:\n            retrieve_gt_bboxes[:, 0::2] = (\n                origin_w - retrieve_gt_bboxes[:, 0::2][:, ::-1])\n\n        # 7. filter\n        cp_retrieve_gt_bboxes = retrieve_gt_bboxes.copy()\n        cp_retrieve_gt_bboxes[:, 0::2] = \\\n            cp_retrieve_gt_bboxes[:, 0::2] - x_offset\n        cp_retrieve_gt_bboxes[:, 1::2] = \\\n            cp_retrieve_gt_bboxes[:, 1::2] - y_offset\n        if self.bbox_clip_border:\n            cp_retrieve_gt_bboxes[:, 0::2] = np.clip(\n                cp_retrieve_gt_bboxes[:, 0::2], 0, target_w)\n            cp_retrieve_gt_bboxes[:, 1::2] = np.clip(\n                cp_retrieve_gt_bboxes[:, 1::2], 0, target_h)\n\n        # 8. mix up\n        ori_img = ori_img.astype(np.float32)\n        mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n        retrieve_gt_labels = retrieve_results['gt_labels']\n        if not self.skip_filter:\n            keep_list = self._filter_box_candidates(retrieve_gt_bboxes.T,\n                                                    cp_retrieve_gt_bboxes.T)\n\n            retrieve_gt_labels = retrieve_gt_labels[keep_list]\n            cp_retrieve_gt_bboxes = cp_retrieve_gt_bboxes[keep_list]\n\n        mixup_gt_bboxes = np.concatenate(\n            (results['gt_bboxes'], cp_retrieve_gt_bboxes), axis=0)\n        mixup_gt_labels = np.concatenate(\n            (results['gt_labels'], retrieve_gt_labels), axis=0)\n\n        # remove outside bbox\n        inside_inds = find_inside_bboxes(mixup_gt_bboxes, target_h, target_w)\n        mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]\n        mixup_gt_labels = mixup_gt_labels[inside_inds]\n\n        results['img'] = mixup_img.astype(np.uint8)\n        results['img_shape'] = mixup_img.shape\n        results['gt_bboxes'] = mixup_gt_bboxes\n        results['gt_labels'] = mixup_gt_labels\n\n        return results\n\n    def _filter_box_candidates(self, bbox1, bbox2):\n        \"\"\"Compute candidate boxes which include following 5 things:\n\n        bbox1 before augment, bbox2 after augment, min_bbox_size (pixels),\n        min_area_ratio, max_aspect_ratio.\n        \"\"\"\n\n        w1, h1 = bbox1[2] - bbox1[0], bbox1[3] - bbox1[1]\n        w2, h2 = bbox2[2] - bbox2[0], bbox2[3] - bbox2[1]\n        ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16))\n        return ((w2 > self.min_bbox_size)\n                & (h2 > self.min_bbox_size)\n                & (w2 * h2 / (w1 * h1 + 1e-16) > self.min_area_ratio)\n                & (ar < self.max_aspect_ratio))\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'dynamic_scale={self.dynamic_scale}, '\n        repr_str += f'ratio_range={self.ratio_range}, '\n        repr_str += f'flip_ratio={self.flip_ratio}, '\n        repr_str += f'pad_val={self.pad_val}, '\n        repr_str += f'max_iters={self.max_iters}, '\n        repr_str += f'min_bbox_size={self.min_bbox_size}, '\n        repr_str += f'min_area_ratio={self.min_area_ratio}, '\n        repr_str += f'max_aspect_ratio={self.max_aspect_ratio}, '\n        repr_str += f'skip_filter={self.skip_filter})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass RandomAffine:\n    \"\"\"Random affine transform data augmentation.\n\n    This operation randomly generates affine transform matrix which including\n    rotation, translation, shear and scaling transforms.\n\n    Args:\n        max_rotate_degree (float): Maximum degrees of rotation transform.\n            Default: 10.\n        max_translate_ratio (float): Maximum ratio of translation.\n            Default: 0.1.\n        scaling_ratio_range (tuple[float]): Min and max ratio of\n            scaling transform. Default: (0.5, 1.5).\n        max_shear_degree (float): Maximum degrees of shear\n            transform. Default: 2.\n        border (tuple[int]): Distance from height and width sides of input\n            image to adjust output shape. Only used in mosaic dataset.\n            Default: (0, 0).\n        border_val (tuple[int]): Border padding values of 3 channels.\n            Default: (114, 114, 114).\n        min_bbox_size (float): Width and height threshold to filter bboxes.\n            If the height or width of a box is smaller than this value, it\n            will be removed. Default: 2.\n        min_area_ratio (float): Threshold of area ratio between\n            original bboxes and wrapped bboxes. If smaller than this value,\n            the box will be removed. Default: 0.2.\n        max_aspect_ratio (float): Aspect ratio of width and height\n            threshold to filter bboxes. If max(h/w, w/h) larger than this\n            value, the box will be removed.\n        bbox_clip_border (bool, optional): Whether to clip the objects outside\n            the border of the image. In some dataset like MOT17, the gt bboxes\n            are allowed to cross the border of images. Therefore, we don't\n            need to clip the gt bboxes in these cases. Defaults to True.\n        skip_filter (bool): Whether to skip filtering rules. If it\n            is True, the filter rule will not be applied, and the\n            `min_bbox_size` and `min_area_ratio` and `max_aspect_ratio`\n            is invalid. Default to True.\n    \"\"\"\n\n    def __init__(self,\n                 max_rotate_degree=10.0,\n                 max_translate_ratio=0.1,\n                 scaling_ratio_range=(0.5, 1.5),\n                 max_shear_degree=2.0,\n                 border=(0, 0),\n                 border_val=(114, 114, 114),\n                 min_bbox_size=2,\n                 min_area_ratio=0.2,\n                 max_aspect_ratio=20,\n                 bbox_clip_border=True,\n                 skip_filter=True):\n        assert 0 <= max_translate_ratio <= 1\n        assert scaling_ratio_range[0] <= scaling_ratio_range[1]\n        assert scaling_ratio_range[0] > 0\n        self.max_rotate_degree = max_rotate_degree\n        self.max_translate_ratio = max_translate_ratio\n        self.scaling_ratio_range = scaling_ratio_range\n        self.max_shear_degree = max_shear_degree\n        self.border = border\n        self.border_val = border_val\n        self.min_bbox_size = min_bbox_size\n        self.min_area_ratio = min_area_ratio\n        self.max_aspect_ratio = max_aspect_ratio\n        self.bbox_clip_border = bbox_clip_border\n        self.skip_filter = skip_filter\n\n    def __call__(self, results):\n        img = results['img']\n        height = img.shape[0] + self.border[0] * 2\n        width = img.shape[1] + self.border[1] * 2\n\n        # Rotation\n        rotation_degree = random.uniform(-self.max_rotate_degree,\n                                         self.max_rotate_degree)\n        rotation_matrix = self._get_rotation_matrix(rotation_degree)\n\n        # Scaling\n        scaling_ratio = random.uniform(self.scaling_ratio_range[0],\n                                       self.scaling_ratio_range[1])\n        scaling_matrix = self._get_scaling_matrix(scaling_ratio)\n\n        # Shear\n        x_degree = random.uniform(-self.max_shear_degree,\n                                  self.max_shear_degree)\n        y_degree = random.uniform(-self.max_shear_degree,\n                                  self.max_shear_degree)\n        shear_matrix = self._get_shear_matrix(x_degree, y_degree)\n\n        # Translation\n        trans_x = random.uniform(-self.max_translate_ratio,\n                                 self.max_translate_ratio) * width\n        trans_y = random.uniform(-self.max_translate_ratio,\n                                 self.max_translate_ratio) * height\n        translate_matrix = self._get_translation_matrix(trans_x, trans_y)\n\n        warp_matrix = (\n            translate_matrix @ shear_matrix @ rotation_matrix @ scaling_matrix)\n\n        img = cv2.warpPerspective(\n            img,\n            warp_matrix,\n            dsize=(width, height),\n            borderValue=self.border_val)\n        results['img'] = img\n        results['img_shape'] = img.shape\n\n        for key in results.get('bbox_fields', []):\n            bboxes = results[key]\n            num_bboxes = len(bboxes)\n            if num_bboxes:\n                # homogeneous coordinates\n                xs = bboxes[:, [0, 0, 2, 2]].reshape(num_bboxes * 4)\n                ys = bboxes[:, [1, 3, 3, 1]].reshape(num_bboxes * 4)\n                ones = np.ones_like(xs)\n                points = np.vstack([xs, ys, ones])\n\n                warp_points = warp_matrix @ points\n                warp_points = warp_points[:2] / warp_points[2]\n                xs = warp_points[0].reshape(num_bboxes, 4)\n                ys = warp_points[1].reshape(num_bboxes, 4)\n\n                warp_bboxes = np.vstack(\n                    (xs.min(1), ys.min(1), xs.max(1), ys.max(1))).T\n\n                if self.bbox_clip_border:\n                    warp_bboxes[:, [0, 2]] = \\\n                        warp_bboxes[:, [0, 2]].clip(0, width)\n                    warp_bboxes[:, [1, 3]] = \\\n                        warp_bboxes[:, [1, 3]].clip(0, height)\n\n                # remove outside bbox\n                valid_index = find_inside_bboxes(warp_bboxes, height, width)\n                if not self.skip_filter:\n                    # filter bboxes\n                    filter_index = self.filter_gt_bboxes(\n                        bboxes * scaling_ratio, warp_bboxes)\n                    valid_index = valid_index & filter_index\n\n                results[key] = warp_bboxes[valid_index]\n                if key in ['gt_bboxes']:\n                    if 'gt_labels' in results:\n                        results['gt_labels'] = results['gt_labels'][\n                            valid_index]\n\n                if 'gt_masks' in results:\n                    raise NotImplementedError(\n                        'RandomAffine only supports bbox.')\n        return results\n\n    def filter_gt_bboxes(self, origin_bboxes, wrapped_bboxes):\n        origin_w = origin_bboxes[:, 2] - origin_bboxes[:, 0]\n        origin_h = origin_bboxes[:, 3] - origin_bboxes[:, 1]\n        wrapped_w = wrapped_bboxes[:, 2] - wrapped_bboxes[:, 0]\n        wrapped_h = wrapped_bboxes[:, 3] - wrapped_bboxes[:, 1]\n        aspect_ratio = np.maximum(wrapped_w / (wrapped_h + 1e-16),\n                                  wrapped_h / (wrapped_w + 1e-16))\n\n        wh_valid_idx = (wrapped_w > self.min_bbox_size) & \\\n                       (wrapped_h > self.min_bbox_size)\n        area_valid_idx = wrapped_w * wrapped_h / (origin_w * origin_h +\n                                                  1e-16) > self.min_area_ratio\n        aspect_ratio_valid_idx = aspect_ratio < self.max_aspect_ratio\n        return wh_valid_idx & area_valid_idx & aspect_ratio_valid_idx\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(max_rotate_degree={self.max_rotate_degree}, '\n        repr_str += f'max_translate_ratio={self.max_translate_ratio}, '\n        repr_str += f'scaling_ratio={self.scaling_ratio_range}, '\n        repr_str += f'max_shear_degree={self.max_shear_degree}, '\n        repr_str += f'border={self.border}, '\n        repr_str += f'border_val={self.border_val}, '\n        repr_str += f'min_bbox_size={self.min_bbox_size}, '\n        repr_str += f'min_area_ratio={self.min_area_ratio}, '\n        repr_str += f'max_aspect_ratio={self.max_aspect_ratio}, '\n        repr_str += f'skip_filter={self.skip_filter})'\n        return repr_str\n\n    @staticmethod\n    def _get_rotation_matrix(rotate_degrees):\n        radian = math.radians(rotate_degrees)\n        rotation_matrix = np.array(\n            [[np.cos(radian), -np.sin(radian), 0.],\n             [np.sin(radian), np.cos(radian), 0.], [0., 0., 1.]],\n            dtype=np.float32)\n        return rotation_matrix\n\n    @staticmethod\n    def _get_scaling_matrix(scale_ratio):\n        scaling_matrix = np.array(\n            [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]],\n            dtype=np.float32)\n        return scaling_matrix\n\n    @staticmethod\n    def _get_share_matrix(scale_ratio):\n        scaling_matrix = np.array(\n            [[scale_ratio, 0., 0.], [0., scale_ratio, 0.], [0., 0., 1.]],\n            dtype=np.float32)\n        return scaling_matrix\n\n    @staticmethod\n    def _get_shear_matrix(x_shear_degrees, y_shear_degrees):\n        x_radian = math.radians(x_shear_degrees)\n        y_radian = math.radians(y_shear_degrees)\n        shear_matrix = np.array([[1, np.tan(x_radian), 0.],\n                                 [np.tan(y_radian), 1, 0.], [0., 0., 1.]],\n                                dtype=np.float32)\n        return shear_matrix\n\n    @staticmethod\n    def _get_translation_matrix(x, y):\n        translation_matrix = np.array([[1, 0., x], [0., 1, y], [0., 0., 1.]],\n                                      dtype=np.float32)\n        return translation_matrix\n\n\n@PIPELINES.register_module()\nclass YOLOXHSVRandomAug:\n    \"\"\"Apply HSV augmentation to image sequentially. It is referenced from\n    https://github.com/Megvii-\n    BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21.\n\n    Args:\n        hue_delta (int): delta of hue. Default: 5.\n        saturation_delta (int): delta of saturation. Default: 30.\n        value_delta (int): delat of value. Default: 30.\n    \"\"\"\n\n    def __init__(self, hue_delta=5, saturation_delta=30, value_delta=30):\n        self.hue_delta = hue_delta\n        self.saturation_delta = saturation_delta\n        self.value_delta = value_delta\n\n    def __call__(self, results):\n        img = results['img']\n        hsv_gains = np.random.uniform(-1, 1, 3) * [\n            self.hue_delta, self.saturation_delta, self.value_delta\n        ]\n        # random selection of h, s, v\n        hsv_gains *= np.random.randint(0, 2, 3)\n        # prevent overflow\n        hsv_gains = hsv_gains.astype(np.int16)\n        img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16)\n\n        img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180\n        img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255)\n        img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255)\n        cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img)\n\n        results['img'] = img\n        return results\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'(hue_delta={self.hue_delta}, '\n        repr_str += f'saturation_delta={self.saturation_delta}, '\n        repr_str += f'value_delta={self.value_delta})'\n        return repr_str\n\n\n@PIPELINES.register_module()\nclass CopyPaste:\n    \"\"\"Simple Copy-Paste is a Strong Data Augmentation Method for Instance\n    Segmentation The simple copy-paste transform steps are as follows:\n\n    1. The destination image is already resized with aspect ratio kept,\n       cropped and padded.\n    2. Randomly select a source image, which is also already resized\n       with aspect ratio kept, cropped and padded in a similar way\n       as the destination image.\n    3. Randomly select some objects from the source image.\n    4. Paste these source objects to the destination image directly,\n       due to the source and destination image have the same size.\n    5. Update object masks of the destination image, for some origin objects\n       may be occluded.\n    6. Generate bboxes from the updated destination masks and\n       filter some objects which are totally occluded, and adjust bboxes\n       which are partly occluded.\n    7. Append selected source bboxes, masks, and labels.\n\n    Args:\n        max_num_pasted (int): The maximum number of pasted objects.\n            Default: 100.\n        bbox_occluded_thr (int): The threshold of occluded bbox.\n            Default: 10.\n        mask_occluded_thr (int): The threshold of occluded mask.\n            Default: 300.\n        selected (bool): Whether select objects or not. If select is False,\n            all objects of the source image will be pasted to the\n            destination image.\n            Default: True.\n    \"\"\"\n\n    def __init__(\n        self,\n        max_num_pasted=100,\n        bbox_occluded_thr=10,\n        mask_occluded_thr=300,\n        selected=True,\n    ):\n        self.max_num_pasted = max_num_pasted\n        self.bbox_occluded_thr = bbox_occluded_thr\n        self.mask_occluded_thr = mask_occluded_thr\n        self.selected = selected\n        self.paste_by_box = False\n\n    def get_indexes(self, dataset):\n        \"\"\"Call function to collect indexes.s.\n\n        Args:\n            dataset (:obj:`MultiImageMixDataset`): The dataset.\n        Returns:\n            list: Indexes.\n        \"\"\"\n        return random.randint(0, len(dataset))\n\n    def gen_masks_from_bboxes(self, bboxes, img_shape):\n        \"\"\"Generate gt_masks based on gt_bboxes.\n\n        Args:\n            bboxes (list): The bboxes's list.\n            img_shape (tuple): The shape of image.\n        Returns:\n            BitmapMasks\n        \"\"\"\n        self.paste_by_box = True\n        img_h, img_w = img_shape[:2]\n        xmin, ymin = bboxes[:, 0:1], bboxes[:, 1:2]\n        xmax, ymax = bboxes[:, 2:3], bboxes[:, 3:4]\n        gt_masks = np.zeros((len(bboxes), img_h, img_w), dtype=np.uint8)\n        for i in range(len(bboxes)):\n            gt_masks[i,\n                     int(ymin[i]):int(ymax[i]),\n                     int(xmin[i]):int(xmax[i])] = 1\n        return BitmapMasks(gt_masks, img_h, img_w)\n\n    def get_gt_masks(self, results):\n        \"\"\"Get gt_masks originally or generated based on bboxes.\n\n        If gt_masks is not contained in results,\n        it will be generated based on gt_bboxes.\n        Args:\n            results (dict): Result dict.\n        Returns:\n            BitmapMasks: gt_masks, originally or generated based on bboxes.\n        \"\"\"\n        if results.get('gt_masks', None) is not None:\n            return results['gt_masks']\n        else:\n            return self.gen_masks_from_bboxes(\n                results.get('gt_bboxes', []), results['img'].shape)\n\n    def __call__(self, results):\n        \"\"\"Call function to make a copy-paste of image.\n\n        Args:\n            results (dict): Result dict.\n        Returns:\n            dict: Result dict with copy-paste transformed.\n        \"\"\"\n\n        assert 'mix_results' in results\n        num_images = len(results['mix_results'])\n        assert num_images == 1, \\\n            f'CopyPaste only supports processing 2 images, got {num_images}'\n\n        # Get gt_masks originally or generated based on bboxes.\n        results['gt_masks'] = self.get_gt_masks(results)\n        # only one mix picture\n        results['mix_results'][0]['gt_masks'] = self.get_gt_masks(\n            results['mix_results'][0])\n\n        if self.selected:\n            selected_results = self._select_object(results['mix_results'][0])\n        else:\n            selected_results = results['mix_results'][0]\n        return self._copy_paste(results, selected_results)\n\n    def _select_object(self, results):\n        \"\"\"Select some objects from the source results.\"\"\"\n        bboxes = results['gt_bboxes']\n        labels = results['gt_labels']\n        masks = results['gt_masks']\n        max_num_pasted = min(bboxes.shape[0] + 1, self.max_num_pasted)\n        num_pasted = np.random.randint(0, max_num_pasted)\n        selected_inds = np.random.choice(\n            bboxes.shape[0], size=num_pasted, replace=False)\n\n        selected_bboxes = bboxes[selected_inds]\n        selected_labels = labels[selected_inds]\n        selected_masks = masks[selected_inds]\n\n        results['gt_bboxes'] = selected_bboxes\n        results['gt_labels'] = selected_labels\n        results['gt_masks'] = selected_masks\n        return results\n\n    def _copy_paste(self, dst_results, src_results):\n        \"\"\"CopyPaste transform function.\n\n        Args:\n            dst_results (dict): Result dict of the destination image.\n            src_results (dict): Result dict of the source image.\n        Returns:\n            dict: Updated result dict.\n        \"\"\"\n        dst_img = dst_results['img']\n        dst_bboxes = dst_results['gt_bboxes']\n        dst_labels = dst_results['gt_labels']\n        dst_masks = dst_results['gt_masks']\n\n        src_img = src_results['img']\n        src_bboxes = src_results['gt_bboxes']\n        src_labels = src_results['gt_labels']\n        src_masks = src_results['gt_masks']\n\n        if len(src_bboxes) == 0:\n            if self.paste_by_box:\n                dst_results.pop('gt_masks')\n            return dst_results\n\n        # update masks and generate bboxes from updated masks\n        composed_mask = np.where(np.any(src_masks.masks, axis=0), 1, 0)\n        updated_dst_masks = self.get_updated_masks(dst_masks, composed_mask)\n        updated_dst_bboxes = updated_dst_masks.get_bboxes()\n        assert len(updated_dst_bboxes) == len(updated_dst_masks)\n\n        # filter totally occluded objects\n        bboxes_inds = np.all(\n            np.abs(\n                (updated_dst_bboxes - dst_bboxes)) <= self.bbox_occluded_thr,\n            axis=-1)\n        masks_inds = updated_dst_masks.masks.sum(\n            axis=(1, 2)) > self.mask_occluded_thr\n        valid_inds = bboxes_inds | masks_inds\n\n        # Paste source objects to destination image directly\n        img = dst_img * (1 - composed_mask[..., np.newaxis]\n                         ) + src_img * composed_mask[..., np.newaxis]\n        bboxes = np.concatenate([updated_dst_bboxes[valid_inds], src_bboxes])\n        labels = np.concatenate([dst_labels[valid_inds], src_labels])\n        masks = np.concatenate(\n            [updated_dst_masks.masks[valid_inds], src_masks.masks])\n\n        dst_results['img'] = img\n        dst_results['gt_bboxes'] = bboxes\n        dst_results['gt_labels'] = labels\n        if self.paste_by_box:\n            dst_results.pop('gt_masks')\n        else:\n            dst_results['gt_masks'] = BitmapMasks(masks, masks.shape[1],\n                                                  masks.shape[2])\n\n        return dst_results\n\n    def get_updated_masks(self, masks, composed_mask):\n        assert masks.masks.shape[-2:] == composed_mask.shape[-2:], \\\n            'Cannot compare two arrays of different size'\n        masks.masks = np.where(composed_mask, 0, masks.masks)\n        return masks\n\n    def __repr__(self):\n        repr_str = self.__class__.__name__\n        repr_str += f'max_num_pasted={self.max_num_pasted}, '\n        repr_str += f'bbox_occluded_thr={self.bbox_occluded_thr}, '\n        repr_str += f'mask_occluded_thr={self.mask_occluded_thr}, '\n        repr_str += f'selected={self.selected}, '\n        return repr_str\n"
  },
  {
    "path": "mmdet/datasets/samplers/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .class_aware_sampler import ClassAwareSampler\nfrom .distributed_sampler import DistributedSampler\nfrom .group_sampler import DistributedGroupSampler, GroupSampler\nfrom .infinite_sampler import InfiniteBatchSampler, InfiniteGroupBatchSampler\n\n__all__ = [\n    'DistributedSampler', 'DistributedGroupSampler', 'GroupSampler',\n    'InfiniteGroupBatchSampler', 'InfiniteBatchSampler', 'ClassAwareSampler'\n]\n"
  },
  {
    "path": "mmdet/datasets/samplers/class_aware_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nfrom mmcv.runner import get_dist_info\nfrom torch.utils.data import Sampler\n\nfrom mmdet.core.utils import sync_random_seed\n\n\nclass ClassAwareSampler(Sampler):\n    r\"\"\"Sampler that restricts data loading to the label of the dataset.\n\n    A class-aware sampling strategy to effectively tackle the\n    non-uniform class distribution. The length of the training data is\n    consistent with source data. Simple improvements based on `Relay\n    Backpropagation for Effective Learning of Deep Convolutional\n    Neural Networks <https://arxiv.org/abs/1512.05830>`_\n\n    The implementation logic is referred to\n    https://github.com/Sense-X/TSD/blob/master/mmdet/datasets/samplers/distributed_classaware_sampler.py\n\n    Args:\n        dataset: Dataset used for sampling.\n        samples_per_gpu (int): When model is :obj:`DistributedDataParallel`,\n            it is the number of training samples on each GPU.\n            When model is :obj:`DataParallel`, it is\n            `num_gpus * samples_per_gpu`.\n            Default : 1.\n        num_replicas (optional): Number of processes participating in\n            distributed training.\n        rank (optional): Rank of the current process within num_replicas.\n        seed (int, optional): random seed used to shuffle the sampler if\n            ``shuffle=True``. This number should be identical across all\n            processes in the distributed group. Default: 0.\n        num_sample_class (int): The number of samples taken from each\n            per-label list. Default: 1\n    \"\"\"\n\n    def __init__(self,\n                 dataset,\n                 samples_per_gpu=1,\n                 num_replicas=None,\n                 rank=None,\n                 seed=0,\n                 num_sample_class=1):\n        _rank, _num_replicas = get_dist_info()\n        if num_replicas is None:\n            num_replicas = _num_replicas\n        if rank is None:\n            rank = _rank\n\n        self.dataset = dataset\n        self.num_replicas = num_replicas\n        self.samples_per_gpu = samples_per_gpu\n        self.rank = rank\n        self.epoch = 0\n        # Must be the same across all workers. If None, will use a\n        # random seed shared among workers\n        # (require synchronization among all workers)\n        self.seed = sync_random_seed(seed)\n\n        # The number of samples taken from each per-label list\n        assert num_sample_class > 0 and isinstance(num_sample_class, int)\n        self.num_sample_class = num_sample_class\n        # Get per-label image list from dataset\n        assert hasattr(dataset, 'get_cat2imgs'), \\\n            'dataset must have `get_cat2imgs` function'\n        self.cat_dict = dataset.get_cat2imgs()\n\n        self.num_samples = int(\n            math.ceil(\n                len(self.dataset) * 1.0 / self.num_replicas /\n                self.samples_per_gpu)) * self.samples_per_gpu\n        self.total_size = self.num_samples * self.num_replicas\n\n        # get number of images containing each category\n        self.num_cat_imgs = [len(x) for x in self.cat_dict.values()]\n        # filter labels without images\n        self.valid_cat_inds = [\n            i for i, length in enumerate(self.num_cat_imgs) if length != 0\n        ]\n        self.num_classes = len(self.valid_cat_inds)\n\n    def __iter__(self):\n        # deterministically shuffle based on epoch\n        g = torch.Generator()\n        g.manual_seed(self.epoch + self.seed)\n\n        # initialize label list\n        label_iter_list = RandomCycleIter(self.valid_cat_inds, generator=g)\n        # initialize each per-label image list\n        data_iter_dict = dict()\n        for i in self.valid_cat_inds:\n            data_iter_dict[i] = RandomCycleIter(self.cat_dict[i], generator=g)\n\n        def gen_cat_img_inds(cls_list, data_dict, num_sample_cls):\n            \"\"\"Traverse the categories and extract `num_sample_cls` image\n            indexes of the corresponding categories one by one.\"\"\"\n            id_indices = []\n            for _ in range(len(cls_list)):\n                cls_idx = next(cls_list)\n                for _ in range(num_sample_cls):\n                    id = next(data_dict[cls_idx])\n                    id_indices.append(id)\n            return id_indices\n\n        # deterministically shuffle based on epoch\n        num_bins = int(\n            math.ceil(self.total_size * 1.0 / self.num_classes /\n                      self.num_sample_class))\n        indices = []\n        for i in range(num_bins):\n            indices += gen_cat_img_inds(label_iter_list, data_iter_dict,\n                                        self.num_sample_class)\n\n        # fix extra samples to make it evenly divisible\n        if len(indices) >= self.total_size:\n            indices = indices[:self.total_size]\n        else:\n            indices += indices[:(self.total_size - len(indices))]\n        assert len(indices) == self.total_size\n\n        # subsample\n        offset = self.num_samples * self.rank\n        indices = indices[offset:offset + self.num_samples]\n        assert len(indices) == self.num_samples\n\n        return iter(indices)\n\n    def __len__(self):\n        return self.num_samples\n\n    def set_epoch(self, epoch):\n        self.epoch = epoch\n\n\nclass RandomCycleIter:\n    \"\"\"Shuffle the list and do it again after the list have traversed.\n\n    The implementation logic is referred to\n    https://github.com/wutong16/DistributionBalancedLoss/blob/master/mllt/datasets/loader/sampler.py\n\n    Example:\n        >>> label_list = [0, 1, 2, 4, 5]\n        >>> g = torch.Generator()\n        >>> g.manual_seed(0)\n        >>> label_iter_list = RandomCycleIter(label_list, generator=g)\n        >>> index = next(label_iter_list)\n    Args:\n        data (list or ndarray): The data that needs to be shuffled.\n        generator: An torch.Generator object, which is used in setting the seed\n            for generating random numbers.\n    \"\"\"  # noqa: W605\n\n    def __init__(self, data, generator=None):\n        self.data = data\n        self.length = len(data)\n        self.index = torch.randperm(self.length, generator=generator).numpy()\n        self.i = 0\n        self.generator = generator\n\n    def __iter__(self):\n        return self\n\n    def __len__(self):\n        return len(self.data)\n\n    def __next__(self):\n        if self.i == self.length:\n            self.index = torch.randperm(\n                self.length, generator=self.generator).numpy()\n            self.i = 0\n        idx = self.data[self.index[self.i]]\n        self.i += 1\n        return idx\n"
  },
  {
    "path": "mmdet/datasets/samplers/distributed_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nfrom torch.utils.data import DistributedSampler as _DistributedSampler\n\nfrom mmdet.core.utils import sync_random_seed\nfrom mmdet.utils import get_device\n\n\nclass DistributedSampler(_DistributedSampler):\n\n    def __init__(self,\n                 dataset,\n                 num_replicas=None,\n                 rank=None,\n                 shuffle=True,\n                 seed=0):\n        super().__init__(\n            dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n\n        # In distributed sampling, different ranks should sample\n        # non-overlapped data in the dataset. Therefore, this function\n        # is used to make sure that each rank shuffles the data indices\n        # in the same order based on the same seed. Then different ranks\n        # could use different indices to select non-overlapped data from the\n        # same data list.\n        device = get_device()\n        self.seed = sync_random_seed(seed, device)\n\n    def __iter__(self):\n        # deterministically shuffle based on epoch\n        if self.shuffle:\n            g = torch.Generator()\n            # When :attr:`shuffle=True`, this ensures all replicas\n            # use a different random ordering for each epoch.\n            # Otherwise, the next iteration of this sampler will\n            # yield the same ordering.\n            g.manual_seed(self.epoch + self.seed)\n            indices = torch.randperm(len(self.dataset), generator=g).tolist()\n        else:\n            indices = torch.arange(len(self.dataset)).tolist()\n\n        # add extra samples to make it evenly divisible\n        # in case that indices is shorter than half of total_size\n        indices = (indices *\n                   math.ceil(self.total_size / len(indices)))[:self.total_size]\n        assert len(indices) == self.total_size\n\n        # subsample\n        indices = indices[self.rank:self.total_size:self.num_replicas]\n        assert len(indices) == self.num_samples\n\n        return iter(indices)\n"
  },
  {
    "path": "mmdet/datasets/samplers/group_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport numpy as np\nimport torch\nfrom mmcv.runner import get_dist_info\nfrom torch.utils.data import Sampler\n\n\nclass GroupSampler(Sampler):\n\n    def __init__(self, dataset, samples_per_gpu=1):\n        assert hasattr(dataset, 'flag')\n        self.dataset = dataset\n        self.samples_per_gpu = samples_per_gpu\n        self.flag = dataset.flag.astype(np.int64)\n        self.group_sizes = np.bincount(self.flag)\n        self.num_samples = 0\n        for i, size in enumerate(self.group_sizes):\n            self.num_samples += int(np.ceil(\n                size / self.samples_per_gpu)) * self.samples_per_gpu\n\n    def __iter__(self):\n        indices = []\n        for i, size in enumerate(self.group_sizes):\n            if size == 0:\n                continue\n            indice = np.where(self.flag == i)[0]\n            assert len(indice) == size\n            np.random.shuffle(indice)\n            num_extra = int(np.ceil(size / self.samples_per_gpu)\n                            ) * self.samples_per_gpu - len(indice)\n            indice = np.concatenate(\n                [indice, np.random.choice(indice, num_extra)])\n            indices.append(indice)\n        indices = np.concatenate(indices)\n        indices = [\n            indices[i * self.samples_per_gpu:(i + 1) * self.samples_per_gpu]\n            for i in np.random.permutation(\n                range(len(indices) // self.samples_per_gpu))\n        ]\n        indices = np.concatenate(indices)\n        indices = indices.astype(np.int64).tolist()\n        assert len(indices) == self.num_samples\n        return iter(indices)\n\n    def __len__(self):\n        return self.num_samples\n\n\nclass DistributedGroupSampler(Sampler):\n    \"\"\"Sampler that restricts data loading to a subset of the dataset.\n\n    It is especially useful in conjunction with\n    :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each\n    process can pass a DistributedSampler instance as a DataLoader sampler,\n    and load a subset of the original dataset that is exclusive to it.\n\n    .. note::\n        Dataset is assumed to be of constant size.\n\n    Arguments:\n        dataset: Dataset used for sampling.\n        num_replicas (optional): Number of processes participating in\n            distributed training.\n        rank (optional): Rank of the current process within num_replicas.\n        seed (int, optional): random seed used to shuffle the sampler if\n            ``shuffle=True``. This number should be identical across all\n            processes in the distributed group. Default: 0.\n    \"\"\"\n\n    def __init__(self,\n                 dataset,\n                 samples_per_gpu=1,\n                 num_replicas=None,\n                 rank=None,\n                 seed=0):\n        _rank, _num_replicas = get_dist_info()\n        if num_replicas is None:\n            num_replicas = _num_replicas\n        if rank is None:\n            rank = _rank\n        self.dataset = dataset\n        self.samples_per_gpu = samples_per_gpu\n        self.num_replicas = num_replicas\n        self.rank = rank\n        self.epoch = 0\n        self.seed = seed if seed is not None else 0\n\n        assert hasattr(self.dataset, 'flag')\n        self.flag = self.dataset.flag\n        self.group_sizes = np.bincount(self.flag)\n\n        self.num_samples = 0\n        for i, j in enumerate(self.group_sizes):\n            self.num_samples += int(\n                math.ceil(self.group_sizes[i] * 1.0 / self.samples_per_gpu /\n                          self.num_replicas)) * self.samples_per_gpu\n        self.total_size = self.num_samples * self.num_replicas\n\n    def __iter__(self):\n        # deterministically shuffle based on epoch\n        g = torch.Generator()\n        g.manual_seed(self.epoch + self.seed)\n\n        indices = []\n        for i, size in enumerate(self.group_sizes):\n            if size > 0:\n                indice = np.where(self.flag == i)[0]\n                assert len(indice) == size\n                # add .numpy() to avoid bug when selecting indice in parrots.\n                # TODO: check whether torch.randperm() can be replaced by\n                # numpy.random.permutation().\n                indice = indice[list(\n                    torch.randperm(int(size), generator=g).numpy())].tolist()\n                extra = int(\n                    math.ceil(\n                        size * 1.0 / self.samples_per_gpu / self.num_replicas)\n                ) * self.samples_per_gpu * self.num_replicas - len(indice)\n                # pad indice\n                tmp = indice.copy()\n                for _ in range(extra // size):\n                    indice.extend(tmp)\n                indice.extend(tmp[:extra % size])\n                indices.extend(indice)\n\n        assert len(indices) == self.total_size\n\n        indices = [\n            indices[j] for i in list(\n                torch.randperm(\n                    len(indices) // self.samples_per_gpu, generator=g))\n            for j in range(i * self.samples_per_gpu, (i + 1) *\n                           self.samples_per_gpu)\n        ]\n\n        # subsample\n        offset = self.num_samples * self.rank\n        indices = indices[offset:offset + self.num_samples]\n        assert len(indices) == self.num_samples\n\n        return iter(indices)\n\n    def __len__(self):\n        return self.num_samples\n\n    def set_epoch(self, epoch):\n        self.epoch = epoch\n"
  },
  {
    "path": "mmdet/datasets/samplers/infinite_sampler.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\n\nimport numpy as np\nimport torch\nfrom mmcv.runner import get_dist_info\nfrom torch.utils.data.sampler import Sampler\n\nfrom mmdet.core.utils import sync_random_seed\n\n\nclass InfiniteGroupBatchSampler(Sampler):\n    \"\"\"Similar to `BatchSampler` warping a `GroupSampler. It is designed for\n    iteration-based runners like `IterBasedRunner` and yields a mini-batch\n    indices each time, all indices in a batch should be in the same group.\n\n    The implementation logic is referred to\n    https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py\n\n    Args:\n        dataset (object): The dataset.\n        batch_size (int): When model is :obj:`DistributedDataParallel`,\n            it is the number of training samples on each GPU.\n            When model is :obj:`DataParallel`, it is\n            `num_gpus * samples_per_gpu`.\n            Default : 1.\n        world_size (int, optional): Number of processes participating in\n            distributed training. Default: None.\n        rank (int, optional): Rank of current process. Default: None.\n        seed (int): Random seed. Default: 0.\n        shuffle (bool): Whether shuffle the indices of a dummy `epoch`, it\n            should be noted that `shuffle` can not guarantee that you can\n            generate sequential indices because it need to ensure\n            that all indices in a batch is in a group. Default: True.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 dataset,\n                 batch_size=1,\n                 world_size=None,\n                 rank=None,\n                 seed=0,\n                 shuffle=True):\n        _rank, _world_size = get_dist_info()\n        if world_size is None:\n            world_size = _world_size\n        if rank is None:\n            rank = _rank\n        self.rank = rank\n        self.world_size = world_size\n        self.dataset = dataset\n        self.batch_size = batch_size\n        # In distributed sampling, different ranks should sample\n        # non-overlapped data in the dataset. Therefore, this function\n        # is used to make sure that each rank shuffles the data indices\n        # in the same order based on the same seed. Then different ranks\n        # could use different indices to select non-overlapped data from the\n        # same data list.\n        self.seed = sync_random_seed(seed)\n        self.shuffle = shuffle\n\n        assert hasattr(self.dataset, 'flag')\n        self.flag = self.dataset.flag\n        self.group_sizes = np.bincount(self.flag)\n        # buffer used to save indices of each group\n        self.buffer_per_group = {k: [] for k in range(len(self.group_sizes))}\n\n        self.size = len(dataset)\n        self.indices = self._indices_of_rank()\n\n    def _infinite_indices(self):\n        \"\"\"Infinitely yield a sequence of indices.\"\"\"\n        g = torch.Generator()\n        g.manual_seed(self.seed)\n        while True:\n            if self.shuffle:\n                yield from torch.randperm(self.size, generator=g).tolist()\n\n            else:\n                yield from torch.arange(self.size).tolist()\n\n    def _indices_of_rank(self):\n        \"\"\"Slice the infinite indices by rank.\"\"\"\n        yield from itertools.islice(self._infinite_indices(), self.rank, None,\n                                    self.world_size)\n\n    def __iter__(self):\n        # once batch size is reached, yield the indices\n        for idx in self.indices:\n            flag = self.flag[idx]\n            group_buffer = self.buffer_per_group[flag]\n            group_buffer.append(idx)\n            if len(group_buffer) == self.batch_size:\n                yield group_buffer[:]\n                del group_buffer[:]\n\n    def __len__(self):\n        \"\"\"Length of base dataset.\"\"\"\n        return self.size\n\n    def set_epoch(self, epoch):\n        \"\"\"Not supported in `IterationBased` runner.\"\"\"\n        raise NotImplementedError\n\n\nclass InfiniteBatchSampler(Sampler):\n    \"\"\"Similar to `BatchSampler` warping a `DistributedSampler. It is designed\n    iteration-based runners like `IterBasedRunner` and yields a mini-batch\n    indices each time.\n\n    The implementation logic is referred to\n    https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/samplers/grouped_batch_sampler.py\n\n    Args:\n        dataset (object): The dataset.\n        batch_size (int): When model is :obj:`DistributedDataParallel`,\n            it is the number of training samples on each GPU,\n            When model is :obj:`DataParallel`, it is\n            `num_gpus * samples_per_gpu`.\n            Default : 1.\n        world_size (int, optional): Number of processes participating in\n            distributed training. Default: None.\n        rank (int, optional): Rank of current process. Default: None.\n        seed (int): Random seed. Default: 0.\n        shuffle (bool): Whether shuffle the dataset or not. Default: True.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 dataset,\n                 batch_size=1,\n                 world_size=None,\n                 rank=None,\n                 seed=0,\n                 shuffle=True):\n        _rank, _world_size = get_dist_info()\n        if world_size is None:\n            world_size = _world_size\n        if rank is None:\n            rank = _rank\n        self.rank = rank\n        self.world_size = world_size\n        self.dataset = dataset\n        self.batch_size = batch_size\n        # In distributed sampling, different ranks should sample\n        # non-overlapped data in the dataset. Therefore, this function\n        # is used to make sure that each rank shuffles the data indices\n        # in the same order based on the same seed. Then different ranks\n        # could use different indices to select non-overlapped data from the\n        # same data list.\n        self.seed = sync_random_seed(seed)\n        self.shuffle = shuffle\n        self.size = len(dataset)\n        self.indices = self._indices_of_rank()\n\n    def _infinite_indices(self):\n        \"\"\"Infinitely yield a sequence of indices.\"\"\"\n        g = torch.Generator()\n        g.manual_seed(self.seed)\n        while True:\n            if self.shuffle:\n                yield from torch.randperm(self.size, generator=g).tolist()\n\n            else:\n                yield from torch.arange(self.size).tolist()\n\n    def _indices_of_rank(self):\n        \"\"\"Slice the infinite indices by rank.\"\"\"\n        yield from itertools.islice(self._infinite_indices(), self.rank, None,\n                                    self.world_size)\n\n    def __iter__(self):\n        # once batch size is reached, yield the indices\n        batch_buffer = []\n        for idx in self.indices:\n            batch_buffer.append(idx)\n            if len(batch_buffer) == self.batch_size:\n                yield batch_buffer\n                batch_buffer = []\n\n    def __len__(self):\n        \"\"\"Length of base dataset.\"\"\"\n        return self.size\n\n    def set_epoch(self, epoch):\n        \"\"\"Not supported in `IterationBased` runner.\"\"\"\n        raise NotImplementedError\n"
  },
  {
    "path": "mmdet/datasets/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\n\nfrom mmcv.cnn import VGG\nfrom mmcv.runner.hooks import HOOKS, Hook\n\nfrom mmdet.datasets.builder import PIPELINES\nfrom mmdet.datasets.pipelines import (LoadAnnotations, LoadImageFromFile,\n                                      LoadPanopticAnnotations)\nfrom mmdet.models.dense_heads import GARPNHead, RPNHead\nfrom mmdet.models.roi_heads.mask_heads import FusedSemanticHead\n\n\ndef replace_ImageToTensor(pipelines):\n    \"\"\"Replace the ImageToTensor transform in a data pipeline to\n    DefaultFormatBundle, which is normally useful in batch inference.\n\n    Args:\n        pipelines (list[dict]): Data pipeline configs.\n\n    Returns:\n        list: The new pipeline list with all ImageToTensor replaced by\n            DefaultFormatBundle.\n\n    Examples:\n        >>> pipelines = [\n        ...    dict(type='LoadImageFromFile'),\n        ...    dict(\n        ...        type='MultiScaleFlipAug',\n        ...        img_scale=(1333, 800),\n        ...        flip=False,\n        ...        transforms=[\n        ...            dict(type='Resize', keep_ratio=True),\n        ...            dict(type='RandomFlip'),\n        ...            dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),\n        ...            dict(type='Pad', size_divisor=32),\n        ...            dict(type='ImageToTensor', keys=['img']),\n        ...            dict(type='Collect', keys=['img']),\n        ...        ])\n        ...    ]\n        >>> expected_pipelines = [\n        ...    dict(type='LoadImageFromFile'),\n        ...    dict(\n        ...        type='MultiScaleFlipAug',\n        ...        img_scale=(1333, 800),\n        ...        flip=False,\n        ...        transforms=[\n        ...            dict(type='Resize', keep_ratio=True),\n        ...            dict(type='RandomFlip'),\n        ...            dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]),\n        ...            dict(type='Pad', size_divisor=32),\n        ...            dict(type='DefaultFormatBundle'),\n        ...            dict(type='Collect', keys=['img']),\n        ...        ])\n        ...    ]\n        >>> assert expected_pipelines == replace_ImageToTensor(pipelines)\n    \"\"\"\n    pipelines = copy.deepcopy(pipelines)\n    for i, pipeline in enumerate(pipelines):\n        if pipeline['type'] == 'MultiScaleFlipAug':\n            assert 'transforms' in pipeline\n            pipeline['transforms'] = replace_ImageToTensor(\n                pipeline['transforms'])\n        elif pipeline['type'] == 'ImageToTensor':\n            warnings.warn(\n                '\"ImageToTensor\" pipeline is replaced by '\n                '\"DefaultFormatBundle\" for batch inference. It is '\n                'recommended to manually replace it in the test '\n                'data pipeline in your config file.', UserWarning)\n            pipelines[i] = {'type': 'DefaultFormatBundle'}\n    return pipelines\n\n\ndef get_loading_pipeline(pipeline):\n    \"\"\"Only keep loading image and annotations related configuration.\n\n    Args:\n        pipeline (list[dict]): Data pipeline configs.\n\n    Returns:\n        list[dict]: The new pipeline list with only keep\n            loading image and annotations related configuration.\n\n    Examples:\n        >>> pipelines = [\n        ...    dict(type='LoadImageFromFile'),\n        ...    dict(type='LoadAnnotations', with_bbox=True),\n        ...    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n        ...    dict(type='RandomFlip', flip_ratio=0.5),\n        ...    dict(type='Normalize', **img_norm_cfg),\n        ...    dict(type='Pad', size_divisor=32),\n        ...    dict(type='DefaultFormatBundle'),\n        ...    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])\n        ...    ]\n        >>> expected_pipelines = [\n        ...    dict(type='LoadImageFromFile'),\n        ...    dict(type='LoadAnnotations', with_bbox=True)\n        ...    ]\n        >>> assert expected_pipelines ==\\\n        ...        get_loading_pipeline(pipelines)\n    \"\"\"\n    loading_pipeline_cfg = []\n    for cfg in pipeline:\n        obj_cls = PIPELINES.get(cfg['type'])\n        # TODO：use more elegant way to distinguish loading modules\n        if obj_cls is not None and obj_cls in (LoadImageFromFile,\n                                               LoadAnnotations,\n                                               LoadPanopticAnnotations):\n            loading_pipeline_cfg.append(cfg)\n    assert len(loading_pipeline_cfg) == 2, \\\n        'The data pipeline in your config file must include ' \\\n        'loading image and annotations related pipeline.'\n    return loading_pipeline_cfg\n\n\n@HOOKS.register_module()\nclass NumClassCheckHook(Hook):\n\n    def _check_head(self, runner):\n        \"\"\"Check whether the `num_classes` in head matches the length of\n        `CLASSES` in `dataset`.\n\n        Args:\n            runner (obj:`EpochBasedRunner`): Epoch based Runner.\n        \"\"\"\n        model = runner.model\n        dataset = runner.data_loader.dataset\n        if dataset.CLASSES is None:\n            runner.logger.warning(\n                f'Please set `CLASSES` '\n                f'in the {dataset.__class__.__name__} and'\n                f'check if it is consistent with the `num_classes` '\n                f'of head')\n        else:\n            assert type(dataset.CLASSES) is not str, \\\n                (f'`CLASSES` in {dataset.__class__.__name__}'\n                 f'should be a tuple of str.'\n                 f'Add comma if number of classes is 1 as '\n                 f'CLASSES = ({dataset.CLASSES},)')\n            for name, module in model.named_modules():\n                if hasattr(module, 'num_classes') and not isinstance(\n                        module, (RPNHead, VGG, FusedSemanticHead, GARPNHead)):\n                    assert module.num_classes == len(dataset.CLASSES), \\\n                        (f'The `num_classes` ({module.num_classes}) in '\n                         f'{module.__class__.__name__} of '\n                         f'{model.__class__.__name__} does not matches '\n                         f'the length of `CLASSES` '\n                         f'{len(dataset.CLASSES)}) in '\n                         f'{dataset.__class__.__name__}')\n\n    def before_train_epoch(self, runner):\n        \"\"\"Check whether the training dataset is compatible with head.\n\n        Args:\n            runner (obj:`EpochBasedRunner`): Epoch based Runner.\n        \"\"\"\n        self._check_head(runner)\n\n    def before_val_epoch(self, runner):\n        \"\"\"Check whether the dataset in val epoch is compatible with head.\n\n        Args:\n            runner (obj:`EpochBasedRunner`): Epoch based Runner.\n        \"\"\"\n        self._check_head(runner)\n"
  },
  {
    "path": "mmdet/datasets/voc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections import OrderedDict\n\nfrom mmcv.utils import print_log\n\nfrom mmdet.core import eval_map, eval_recalls\nfrom .builder import DATASETS\nfrom .xml_style import XMLDataset\n\n\n@DATASETS.register_module()\nclass VOCDataset(XMLDataset):\n\n    CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car',\n               'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse',\n               'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',\n               'tvmonitor')\n\n    PALETTE = [(106, 0, 228), (119, 11, 32), (165, 42, 42), (0, 0, 192),\n               (197, 226, 255), (0, 60, 100), (0, 0, 142), (255, 77, 255),\n               (153, 69, 1), (120, 166, 157), (0, 182, 199), (0, 226, 252),\n               (182, 182, 255), (0, 0, 230), (220, 20, 60), (163, 255, 0),\n               (0, 82, 0), (3, 95, 161), (0, 80, 100), (183, 130, 88)]\n\n    def __init__(self, **kwargs):\n        super(VOCDataset, self).__init__(**kwargs)\n        if 'VOC2007' in self.img_prefix:\n            self.year = 2007\n        elif 'VOC2012' in self.img_prefix:\n            self.year = 2012\n        else:\n            raise ValueError('Cannot infer dataset year from img_prefix')\n\n    def evaluate(self,\n                 results,\n                 metric='mAP',\n                 logger=None,\n                 proposal_nums=(100, 300, 1000),\n                 iou_thr=0.5,\n                 scale_ranges=None):\n        \"\"\"Evaluate in VOC protocol.\n\n        Args:\n            results (list[list | tuple]): Testing results of the dataset.\n            metric (str | list[str]): Metrics to be evaluated. Options are\n                'mAP', 'recall'.\n            logger (logging.Logger | str, optional): Logger used for printing\n                related information during evaluation. Default: None.\n            proposal_nums (Sequence[int]): Proposal number used for evaluating\n                recalls, such as recall@100, recall@1000.\n                Default: (100, 300, 1000).\n            iou_thr (float | list[float]): IoU threshold. Default: 0.5.\n            scale_ranges (list[tuple], optional): Scale ranges for evaluating\n                mAP. If not specified, all bounding boxes would be included in\n                evaluation. Default: None.\n\n        Returns:\n            dict[str, float]: AP/recall metrics.\n        \"\"\"\n\n        if not isinstance(metric, str):\n            assert len(metric) == 1\n            metric = metric[0]\n        allowed_metrics = ['mAP', 'recall']\n        if metric not in allowed_metrics:\n            raise KeyError(f'metric {metric} is not supported')\n        annotations = [self.get_ann_info(i) for i in range(len(self))]\n        eval_results = OrderedDict()\n        iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr\n        if metric == 'mAP':\n            assert isinstance(iou_thrs, list)\n            if self.year == 2007:\n                ds_name = 'voc07'\n            else:\n                ds_name = self.CLASSES\n            mean_aps = []\n            for iou_thr in iou_thrs:\n                print_log(f'\\n{\"-\" * 15}iou_thr: {iou_thr}{\"-\" * 15}')\n                # Follow the official implementation,\n                # http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar\n                # we should use the legacy coordinate system in mmdet 1.x,\n                # which means w, h should be computed as 'x2 - x1 + 1` and\n                # `y2 - y1 + 1`\n                mean_ap, _ = eval_map(\n                    results,\n                    annotations,\n                    scale_ranges=None,\n                    iou_thr=iou_thr,\n                    dataset=ds_name,\n                    logger=logger,\n                    use_legacy_coordinate=True)\n                mean_aps.append(mean_ap)\n                eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3)\n            eval_results['mAP'] = sum(mean_aps) / len(mean_aps)\n            eval_results.move_to_end('mAP', last=False)\n        elif metric == 'recall':\n            gt_bboxes = [ann['bboxes'] for ann in annotations]\n            recalls = eval_recalls(\n                gt_bboxes,\n                results,\n                proposal_nums,\n                iou_thrs,\n                logger=logger,\n                use_legacy_coordinate=True)\n            for i, num in enumerate(proposal_nums):\n                for j, iou_thr in enumerate(iou_thrs):\n                    eval_results[f'recall@{num}@{iou_thr}'] = recalls[i, j]\n            if recalls.shape[1] > 1:\n                ar = recalls.mean(axis=1)\n                for i, num in enumerate(proposal_nums):\n                    eval_results[f'AR@{num}'] = ar[i]\n        return eval_results\n"
  },
  {
    "path": "mmdet/datasets/wider_face.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport xml.etree.ElementTree as ET\n\nimport mmcv\n\nfrom .builder import DATASETS\nfrom .xml_style import XMLDataset\n\n\n@DATASETS.register_module()\nclass WIDERFaceDataset(XMLDataset):\n    \"\"\"Reader for the WIDER Face dataset in PASCAL VOC format.\n\n    Conversion scripts can be found in\n    https://github.com/sovrasov/wider-face-pascal-voc-annotations\n    \"\"\"\n    CLASSES = ('face', )\n\n    PALETTE = [(0, 255, 0)]\n\n    def __init__(self, **kwargs):\n        super(WIDERFaceDataset, self).__init__(**kwargs)\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from WIDERFace XML style annotation file.\n\n        Args:\n            ann_file (str): Path of XML file.\n\n        Returns:\n            list[dict]: Annotation info from XML file.\n        \"\"\"\n\n        data_infos = []\n        img_ids = mmcv.list_from_file(ann_file)\n        for img_id in img_ids:\n            filename = f'{img_id}.jpg'\n            xml_path = osp.join(self.img_prefix, 'Annotations',\n                                f'{img_id}.xml')\n            tree = ET.parse(xml_path)\n            root = tree.getroot()\n            size = root.find('size')\n            width = int(size.find('width').text)\n            height = int(size.find('height').text)\n            folder = root.find('folder').text\n            data_infos.append(\n                dict(\n                    id=img_id,\n                    filename=osp.join(folder, filename),\n                    width=width,\n                    height=height))\n\n        return data_infos\n"
  },
  {
    "path": "mmdet/datasets/xml_style.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport xml.etree.ElementTree as ET\n\nimport mmcv\nimport numpy as np\nfrom PIL import Image\n\nfrom .builder import DATASETS\nfrom .custom import CustomDataset\n\n\n@DATASETS.register_module()\nclass XMLDataset(CustomDataset):\n    \"\"\"XML dataset for detection.\n\n    Args:\n        min_size (int | float, optional): The minimum size of bounding\n            boxes in the images. If the size of a bounding box is less than\n            ``min_size``, it would be add to ignored field.\n        img_subdir (str): Subdir where images are stored. Default: JPEGImages.\n        ann_subdir (str): Subdir where annotations are. Default: Annotations.\n    \"\"\"\n\n    def __init__(self,\n                 min_size=None,\n                 img_subdir='JPEGImages',\n                 ann_subdir='Annotations',\n                 **kwargs):\n        assert self.CLASSES or kwargs.get(\n            'classes', None), 'CLASSES in `XMLDataset` can not be None.'\n        self.img_subdir = img_subdir\n        self.ann_subdir = ann_subdir\n        super(XMLDataset, self).__init__(**kwargs)\n        self.cat2label = {cat: i for i, cat in enumerate(self.CLASSES)}\n        self.min_size = min_size\n\n    def load_annotations(self, ann_file):\n        \"\"\"Load annotation from XML style ann_file.\n\n        Args:\n            ann_file (str): Path of XML file.\n\n        Returns:\n            list[dict]: Annotation info from XML file.\n        \"\"\"\n\n        data_infos = []\n        img_ids = mmcv.list_from_file(ann_file)\n        for img_id in img_ids:\n            filename = osp.join(self.img_subdir, f'{img_id}.jpg')\n            xml_path = osp.join(self.img_prefix, self.ann_subdir,\n                                f'{img_id}.xml')\n            tree = ET.parse(xml_path)\n            root = tree.getroot()\n            size = root.find('size')\n            if size is not None:\n                width = int(size.find('width').text)\n                height = int(size.find('height').text)\n            else:\n                img_path = osp.join(self.img_prefix, filename)\n                img = Image.open(img_path)\n                width, height = img.size\n            data_infos.append(\n                dict(id=img_id, filename=filename, width=width, height=height))\n\n        return data_infos\n\n    def _filter_imgs(self, min_size=32):\n        \"\"\"Filter images too small or without annotation.\"\"\"\n        valid_inds = []\n        for i, img_info in enumerate(self.data_infos):\n            if min(img_info['width'], img_info['height']) < min_size:\n                continue\n            if self.filter_empty_gt:\n                img_id = img_info['id']\n                xml_path = osp.join(self.img_prefix, self.ann_subdir,\n                                    f'{img_id}.xml')\n                tree = ET.parse(xml_path)\n                root = tree.getroot()\n                for obj in root.findall('object'):\n                    name = obj.find('name').text\n                    if name in self.CLASSES:\n                        valid_inds.append(i)\n                        break\n            else:\n                valid_inds.append(i)\n        return valid_inds\n\n    def get_ann_info(self, idx):\n        \"\"\"Get annotation from XML file by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            dict: Annotation info of specified index.\n        \"\"\"\n\n        img_id = self.data_infos[idx]['id']\n        xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')\n        tree = ET.parse(xml_path)\n        root = tree.getroot()\n        bboxes = []\n        labels = []\n        bboxes_ignore = []\n        labels_ignore = []\n        for obj in root.findall('object'):\n            name = obj.find('name').text\n            if name not in self.CLASSES:\n                continue\n            label = self.cat2label[name]\n            difficult = obj.find('difficult')\n            difficult = 0 if difficult is None else int(difficult.text)\n            bnd_box = obj.find('bndbox')\n            # TODO: check whether it is necessary to use int\n            # Coordinates may be float type\n            bbox = [\n                int(float(bnd_box.find('xmin').text)),\n                int(float(bnd_box.find('ymin').text)),\n                int(float(bnd_box.find('xmax').text)),\n                int(float(bnd_box.find('ymax').text))\n            ]\n            ignore = False\n            if self.min_size:\n                assert not self.test_mode\n                w = bbox[2] - bbox[0]\n                h = bbox[3] - bbox[1]\n                if w < self.min_size or h < self.min_size:\n                    ignore = True\n            if difficult or ignore:\n                bboxes_ignore.append(bbox)\n                labels_ignore.append(label)\n            else:\n                bboxes.append(bbox)\n                labels.append(label)\n        if not bboxes:\n            bboxes = np.zeros((0, 4))\n            labels = np.zeros((0, ))\n        else:\n            bboxes = np.array(bboxes, ndmin=2) - 1\n            labels = np.array(labels)\n        if not bboxes_ignore:\n            bboxes_ignore = np.zeros((0, 4))\n            labels_ignore = np.zeros((0, ))\n        else:\n            bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1\n            labels_ignore = np.array(labels_ignore)\n        ann = dict(\n            bboxes=bboxes.astype(np.float32),\n            labels=labels.astype(np.int64),\n            bboxes_ignore=bboxes_ignore.astype(np.float32),\n            labels_ignore=labels_ignore.astype(np.int64))\n        return ann\n\n    def get_cat_ids(self, idx):\n        \"\"\"Get category ids in XML file by index.\n\n        Args:\n            idx (int): Index of data.\n\n        Returns:\n            list[int]: All categories in the image of specified index.\n        \"\"\"\n\n        cat_ids = []\n        img_id = self.data_infos[idx]['id']\n        xml_path = osp.join(self.img_prefix, self.ann_subdir, f'{img_id}.xml')\n        tree = ET.parse(xml_path)\n        root = tree.getroot()\n        for obj in root.findall('object'):\n            name = obj.find('name').text\n            if name not in self.CLASSES:\n                continue\n            label = self.cat2label[name]\n            cat_ids.append(label)\n\n        return cat_ids\n"
  },
  {
    "path": "mmdet/models/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .backbones import *  # noqa: F401,F403\nfrom .builder import (BACKBONES, DETECTORS, HEADS, LOSSES, NECKS,\n                      ROI_EXTRACTORS, SHARED_HEADS, build_backbone,\n                      build_detector, build_head, build_loss, build_neck,\n                      build_roi_extractor, build_shared_head)\nfrom .dense_heads import *  # noqa: F401,F403\nfrom .detectors import *  # noqa: F401,F403\nfrom .losses import *  # noqa: F401,F403\nfrom .necks import *  # noqa: F401,F403\nfrom .plugins import *  # noqa: F401,F403\nfrom .roi_heads import *  # noqa: F401,F403\nfrom .seg_heads import *  # noqa: F401,F403\n\n__all__ = [\n    'BACKBONES', 'NECKS', 'ROI_EXTRACTORS', 'SHARED_HEADS', 'HEADS', 'LOSSES',\n    'DETECTORS', 'build_backbone', 'build_neck', 'build_roi_extractor',\n    'build_shared_head', 'build_head', 'build_loss', 'build_detector'\n]\n"
  },
  {
    "path": "mmdet/models/backbones/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .csp_darknet import CSPDarknet\nfrom .darknet import Darknet\nfrom .detectors_resnet import DetectoRS_ResNet\nfrom .detectors_resnext import DetectoRS_ResNeXt\nfrom .efficientnet import EfficientNet\nfrom .hourglass import HourglassNet\nfrom .hrnet import HRNet\nfrom .mobilenet_v2 import MobileNetV2\nfrom .pvt import PyramidVisionTransformer, PyramidVisionTransformerV2\nfrom .regnet import RegNet\nfrom .res2net import Res2Net\nfrom .resnest import ResNeSt\nfrom .resnet import ResNet, ResNetV1d\nfrom .resnext import ResNeXt\nfrom .ssd_vgg import SSDVGG\nfrom .swin import SwinTransformer\nfrom .trident_resnet import TridentResNet\n\n__all__ = [\n    'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet',\n    'MobileNetV2', 'Res2Net', 'HourglassNet', 'DetectoRS_ResNet',\n    'DetectoRS_ResNeXt', 'Darknet', 'ResNeSt', 'TridentResNet', 'CSPDarknet',\n    'SwinTransformer', 'PyramidVisionTransformer',\n    'PyramidVisionTransformerV2', 'EfficientNet'\n]\n"
  },
  {
    "path": "mmdet/models/backbones/csp_darknet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmcv.runner import BaseModule\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..builder import BACKBONES\nfrom ..utils import CSPLayer\n\n\nclass Focus(nn.Module):\n    \"\"\"Focus width and height information into channel space.\n\n    Args:\n        in_channels (int): The input channels of this Module.\n        out_channels (int): The output channels of this Module.\n        kernel_size (int): The kernel size of the convolution. Default: 1\n        stride (int): The stride of the convolution. Default: 1\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN', momentum=0.03, eps=0.001).\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='Swish').\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_size=1,\n                 stride=1,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish')):\n        super().__init__()\n        self.conv = ConvModule(\n            in_channels * 4,\n            out_channels,\n            kernel_size,\n            stride,\n            padding=(kernel_size - 1) // 2,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n    def forward(self, x):\n        # shape of x (b,c,w,h) -> y(b,4c,w/2,h/2)\n        patch_top_left = x[..., ::2, ::2]\n        patch_top_right = x[..., ::2, 1::2]\n        patch_bot_left = x[..., 1::2, ::2]\n        patch_bot_right = x[..., 1::2, 1::2]\n        x = torch.cat(\n            (\n                patch_top_left,\n                patch_bot_left,\n                patch_top_right,\n                patch_bot_right,\n            ),\n            dim=1,\n        )\n        return self.conv(x)\n\n\nclass SPPBottleneck(BaseModule):\n    \"\"\"Spatial pyramid pooling layer used in YOLOv3-SPP.\n\n    Args:\n        in_channels (int): The input channels of this Module.\n        out_channels (int): The output channels of this Module.\n        kernel_sizes (tuple[int]): Sequential of kernel sizes of pooling\n            layers. Default: (5, 9, 13).\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN').\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='Swish').\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_sizes=(5, 9, 13),\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish'),\n                 init_cfg=None):\n        super().__init__(init_cfg)\n        mid_channels = in_channels // 2\n        self.conv1 = ConvModule(\n            in_channels,\n            mid_channels,\n            1,\n            stride=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.poolings = nn.ModuleList([\n            nn.MaxPool2d(kernel_size=ks, stride=1, padding=ks // 2)\n            for ks in kernel_sizes\n        ])\n        conv2_channels = mid_channels * (len(kernel_sizes) + 1)\n        self.conv2 = ConvModule(\n            conv2_channels,\n            out_channels,\n            1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n    def forward(self, x):\n        x = self.conv1(x)\n        x = torch.cat([x] + [pooling(x) for pooling in self.poolings], dim=1)\n        x = self.conv2(x)\n        return x\n\n\n@BACKBONES.register_module()\nclass CSPDarknet(BaseModule):\n    \"\"\"CSP-Darknet backbone used in YOLOv5 and YOLOX.\n\n    Args:\n        arch (str): Architecture of CSP-Darknet, from {P5, P6}.\n            Default: P5.\n        deepen_factor (float): Depth multiplier, multiply number of\n            blocks in CSP layer by this amount. Default: 1.0.\n        widen_factor (float): Width multiplier, multiply number of\n            channels in each layer by this amount. Default: 1.0.\n        out_indices (Sequence[int]): Output from which stages.\n            Default: (2, 3, 4).\n        frozen_stages (int): Stages to be frozen (stop grad and set eval\n            mode). -1 means not freezing any parameters. Default: -1.\n        use_depthwise (bool): Whether to use depthwise separable convolution.\n            Default: False.\n        arch_ovewrite(list): Overwrite default arch settings. Default: None.\n        spp_kernal_sizes: (tuple[int]): Sequential of kernel sizes of SPP\n            layers. Default: (5, 9, 13).\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: dict(type='BN', requires_grad=True).\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    Example:\n        >>> from mmdet.models import CSPDarknet\n        >>> import torch\n        >>> self = CSPDarknet(depth=53)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 416, 416)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        ...\n        (1, 256, 52, 52)\n        (1, 512, 26, 26)\n        (1, 1024, 13, 13)\n    \"\"\"\n    # From left to right:\n    # in_channels, out_channels, num_blocks, add_identity, use_spp\n    arch_settings = {\n        'P5': [[64, 128, 3, True, False], [128, 256, 9, True, False],\n               [256, 512, 9, True, False], [512, 1024, 3, False, True]],\n        'P6': [[64, 128, 3, True, False], [128, 256, 9, True, False],\n               [256, 512, 9, True, False], [512, 768, 3, True, False],\n               [768, 1024, 3, False, True]]\n    }\n\n    def __init__(self,\n                 arch='P5',\n                 deepen_factor=1.0,\n                 widen_factor=1.0,\n                 out_indices=(2, 3, 4),\n                 frozen_stages=-1,\n                 use_depthwise=False,\n                 arch_ovewrite=None,\n                 spp_kernal_sizes=(5, 9, 13),\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish'),\n                 norm_eval=False,\n                 init_cfg=dict(\n                     type='Kaiming',\n                     layer='Conv2d',\n                     a=math.sqrt(5),\n                     distribution='uniform',\n                     mode='fan_in',\n                     nonlinearity='leaky_relu')):\n        super().__init__(init_cfg)\n        arch_setting = self.arch_settings[arch]\n        if arch_ovewrite:\n            arch_setting = arch_ovewrite\n        assert set(out_indices).issubset(\n            i for i in range(len(arch_setting) + 1))\n        if frozen_stages not in range(-1, len(arch_setting) + 1):\n            raise ValueError('frozen_stages must be in range(-1, '\n                             'len(arch_setting) + 1). But received '\n                             f'{frozen_stages}')\n\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n        self.use_depthwise = use_depthwise\n        self.norm_eval = norm_eval\n        conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule\n\n        self.stem = Focus(\n            3,\n            int(arch_setting[0][0] * widen_factor),\n            kernel_size=3,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.layers = ['stem']\n\n        for i, (in_channels, out_channels, num_blocks, add_identity,\n                use_spp) in enumerate(arch_setting):\n            in_channels = int(in_channels * widen_factor)\n            out_channels = int(out_channels * widen_factor)\n            num_blocks = max(round(num_blocks * deepen_factor), 1)\n            stage = []\n            conv_layer = conv(\n                in_channels,\n                out_channels,\n                3,\n                stride=2,\n                padding=1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            stage.append(conv_layer)\n            if use_spp:\n                spp = SPPBottleneck(\n                    out_channels,\n                    out_channels,\n                    kernel_sizes=spp_kernal_sizes,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg)\n                stage.append(spp)\n            csp_layer = CSPLayer(\n                out_channels,\n                out_channels,\n                num_blocks=num_blocks,\n                add_identity=add_identity,\n                use_depthwise=use_depthwise,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            stage.append(csp_layer)\n            self.add_module(f'stage{i + 1}', nn.Sequential(*stage))\n            self.layers.append(f'stage{i + 1}')\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            for i in range(self.frozen_stages + 1):\n                m = getattr(self, self.layers[i])\n                m.eval()\n                for param in m.parameters():\n                    param.requires_grad = False\n\n    def train(self, mode=True):\n        super(CSPDarknet, self).train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n\n    def forward(self, x):\n        outs = []\n        for i, layer_name in enumerate(self.layers):\n            layer = getattr(self, layer_name)\n            x = layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/backbones/darknet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Copyright (c) 2019 Western Digital Corporation or its affiliates.\n\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..builder import BACKBONES\n\n\nclass ResBlock(BaseModule):\n    \"\"\"The basic residual block used in Darknet. Each ResBlock consists of two\n    ConvModules and the input is added to the final output. Each ConvModule is\n    composed of Conv, BN, and LeakyReLU. In YoloV3 paper, the first convLayer\n    has half of the number of the filters as much as the second convLayer. The\n    first convLayer has filter size of 1x1 and the second one has the filter\n    size of 3x3.\n\n    Args:\n        in_channels (int): The input channels. Must be even.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: dict(type='BN', requires_grad=True)\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 act_cfg=dict(type='LeakyReLU', negative_slope=0.1),\n                 init_cfg=None):\n        super(ResBlock, self).__init__(init_cfg)\n        assert in_channels % 2 == 0  # ensure the in_channels is even\n        half_in_channels = in_channels // 2\n\n        # shortcut\n        cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)\n\n        self.conv1 = ConvModule(in_channels, half_in_channels, 1, **cfg)\n        self.conv2 = ConvModule(\n            half_in_channels, in_channels, 3, padding=1, **cfg)\n\n    def forward(self, x):\n        residual = x\n        out = self.conv1(x)\n        out = self.conv2(out)\n        out = out + residual\n\n        return out\n\n\n@BACKBONES.register_module()\nclass Darknet(BaseModule):\n    \"\"\"Darknet backbone.\n\n    Args:\n        depth (int): Depth of Darknet. Currently only support 53.\n        out_indices (Sequence[int]): Output from which stages.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            -1 means not freezing any parameters. Default: -1.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: dict(type='BN', requires_grad=True)\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Example:\n        >>> from mmdet.models import Darknet\n        >>> import torch\n        >>> self = Darknet(depth=53)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 416, 416)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        ...\n        (1, 256, 52, 52)\n        (1, 512, 26, 26)\n        (1, 1024, 13, 13)\n    \"\"\"\n\n    # Dict(depth: (layers, channels))\n    arch_settings = {\n        53: ((1, 2, 8, 8, 4), ((32, 64), (64, 128), (128, 256), (256, 512),\n                               (512, 1024)))\n    }\n\n    def __init__(self,\n                 depth=53,\n                 out_indices=(3, 4, 5),\n                 frozen_stages=-1,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 act_cfg=dict(type='LeakyReLU', negative_slope=0.1),\n                 norm_eval=True,\n                 pretrained=None,\n                 init_cfg=None):\n        super(Darknet, self).__init__(init_cfg)\n        if depth not in self.arch_settings:\n            raise KeyError(f'invalid depth {depth} for darknet')\n\n        self.depth = depth\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n        self.layers, self.channels = self.arch_settings[depth]\n\n        cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)\n\n        self.conv1 = ConvModule(3, 32, 3, padding=1, **cfg)\n\n        self.cr_blocks = ['conv1']\n        for i, n_layers in enumerate(self.layers):\n            layer_name = f'conv_res_block{i + 1}'\n            in_c, out_c = self.channels[i]\n            self.add_module(\n                layer_name,\n                self.make_conv_res_block(in_c, out_c, n_layers, **cfg))\n            self.cr_blocks.append(layer_name)\n\n        self.norm_eval = norm_eval\n\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n    def forward(self, x):\n        outs = []\n        for i, layer_name in enumerate(self.cr_blocks):\n            cr_block = getattr(self, layer_name)\n            x = cr_block(x)\n            if i in self.out_indices:\n                outs.append(x)\n\n        return tuple(outs)\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            for i in range(self.frozen_stages):\n                m = getattr(self, self.cr_blocks[i])\n                m.eval()\n                for param in m.parameters():\n                    param.requires_grad = False\n\n    def train(self, mode=True):\n        super(Darknet, self).train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n\n    @staticmethod\n    def make_conv_res_block(in_channels,\n                            out_channels,\n                            res_repeat,\n                            conv_cfg=None,\n                            norm_cfg=dict(type='BN', requires_grad=True),\n                            act_cfg=dict(type='LeakyReLU',\n                                         negative_slope=0.1)):\n        \"\"\"In Darknet backbone, ConvLayer is usually followed by ResBlock. This\n        function will make that. The Conv layers always have 3x3 filters with\n        stride=2. The number of the filters in Conv layer is the same as the\n        out channels of the ResBlock.\n\n        Args:\n            in_channels (int): The number of input channels.\n            out_channels (int): The number of output channels.\n            res_repeat (int): The number of ResBlocks.\n            conv_cfg (dict): Config dict for convolution layer. Default: None.\n            norm_cfg (dict): Dictionary to construct and config norm layer.\n                Default: dict(type='BN', requires_grad=True)\n            act_cfg (dict): Config dict for activation layer.\n                Default: dict(type='LeakyReLU', negative_slope=0.1).\n        \"\"\"\n\n        cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)\n\n        model = nn.Sequential()\n        model.add_module(\n            'conv',\n            ConvModule(\n                in_channels, out_channels, 3, stride=2, padding=1, **cfg))\n        for idx in range(res_repeat):\n            model.add_module('res{}'.format(idx),\n                             ResBlock(out_channels, **cfg))\n        return model\n"
  },
  {
    "path": "mmdet/models/backbones/detectors_resnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,\n                      kaiming_init)\nfrom mmcv.runner import Sequential, load_checkpoint\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom mmdet.utils import get_root_logger\nfrom ..builder import BACKBONES\nfrom .resnet import BasicBlock\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .resnet import ResNet\n\n\nclass Bottleneck(_Bottleneck):\n    r\"\"\"Bottleneck for the ResNet backbone in `DetectoRS\n    <https://arxiv.org/pdf/2006.02334.pdf>`_.\n\n    This bottleneck allows the users to specify whether to use\n    SAC (Switchable Atrous Convolution) and RFP (Recursive Feature Pyramid).\n\n    Args:\n         inplanes (int): The number of input channels.\n         planes (int): The number of output channels before expansion.\n         rfp_inplanes (int, optional): The number of channels from RFP.\n             Default: None. If specified, an additional conv layer will be\n             added for ``rfp_feat``. Otherwise, the structure is the same as\n             base class.\n         sac (dict, optional): Dictionary to construct SAC. Default: None.\n         init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 rfp_inplanes=None,\n                 sac=None,\n                 init_cfg=None,\n                 **kwargs):\n        super(Bottleneck, self).__init__(\n            inplanes, planes, init_cfg=init_cfg, **kwargs)\n\n        assert sac is None or isinstance(sac, dict)\n        self.sac = sac\n        self.with_sac = sac is not None\n        if self.with_sac:\n            self.conv2 = build_conv_layer(\n                self.sac,\n                planes,\n                planes,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                bias=False)\n\n        self.rfp_inplanes = rfp_inplanes\n        if self.rfp_inplanes:\n            self.rfp_conv = build_conv_layer(\n                None,\n                self.rfp_inplanes,\n                planes * self.expansion,\n                1,\n                stride=1,\n                bias=True)\n            if init_cfg is None:\n                self.init_cfg = dict(\n                    type='Constant', val=0, override=dict(name='rfp_conv'))\n\n    def rfp_forward(self, x, rfp_feat):\n        \"\"\"The forward function that also takes the RFP features as input.\"\"\"\n\n        def _inner_forward(x):\n            identity = x\n\n            out = self.conv1(x)\n            out = self.norm1(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n            out = self.conv2(out)\n            out = self.norm2(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n            out = self.conv3(out)\n            out = self.norm3(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n            if self.downsample is not None:\n                identity = self.downsample(x)\n\n            out += identity\n\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        if self.rfp_inplanes:\n            rfp_feat = self.rfp_conv(rfp_feat)\n            out = out + rfp_feat\n\n        out = self.relu(out)\n\n        return out\n\n\nclass ResLayer(Sequential):\n    \"\"\"ResLayer to build ResNet style backbone for RPF in detectoRS.\n\n    The difference between this module and base class is that we pass\n    ``rfp_inplanes`` to the first block.\n\n    Args:\n        block (nn.Module): block used to build ResLayer.\n        inplanes (int): inplanes of block.\n        planes (int): planes of block.\n        num_blocks (int): number of blocks.\n        stride (int): stride of the first block. Default: 1\n        avg_down (bool): Use AvgPool instead of stride conv when\n            downsampling in the bottleneck. Default: False\n        conv_cfg (dict): dictionary to construct and config conv layer.\n            Default: None\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Default: dict(type='BN')\n        downsample_first (bool): Downsample at the first block or last block.\n            False for Hourglass, True for ResNet. Default: True\n        rfp_inplanes (int, optional): The number of channels from RFP.\n            Default: None. If specified, an additional conv layer will be\n            added for ``rfp_feat``. Otherwise, the structure is the same as\n            base class.\n    \"\"\"\n\n    def __init__(self,\n                 block,\n                 inplanes,\n                 planes,\n                 num_blocks,\n                 stride=1,\n                 avg_down=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 downsample_first=True,\n                 rfp_inplanes=None,\n                 **kwargs):\n        self.block = block\n        assert downsample_first, f'downsample_first={downsample_first} is ' \\\n                                 'not supported in DetectoRS'\n\n        downsample = None\n        if stride != 1 or inplanes != planes * block.expansion:\n            downsample = []\n            conv_stride = stride\n            if avg_down and stride != 1:\n                conv_stride = 1\n                downsample.append(\n                    nn.AvgPool2d(\n                        kernel_size=stride,\n                        stride=stride,\n                        ceil_mode=True,\n                        count_include_pad=False))\n            downsample.extend([\n                build_conv_layer(\n                    conv_cfg,\n                    inplanes,\n                    planes * block.expansion,\n                    kernel_size=1,\n                    stride=conv_stride,\n                    bias=False),\n                build_norm_layer(norm_cfg, planes * block.expansion)[1]\n            ])\n            downsample = nn.Sequential(*downsample)\n\n        layers = []\n        layers.append(\n            block(\n                inplanes=inplanes,\n                planes=planes,\n                stride=stride,\n                downsample=downsample,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                rfp_inplanes=rfp_inplanes,\n                **kwargs))\n        inplanes = planes * block.expansion\n        for _ in range(1, num_blocks):\n            layers.append(\n                block(\n                    inplanes=inplanes,\n                    planes=planes,\n                    stride=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    **kwargs))\n\n        super(ResLayer, self).__init__(*layers)\n\n\n@BACKBONES.register_module()\nclass DetectoRS_ResNet(ResNet):\n    \"\"\"ResNet backbone for DetectoRS.\n\n    Args:\n        sac (dict, optional): Dictionary to construct SAC (Switchable Atrous\n            Convolution). Default: None.\n        stage_with_sac (list): Which stage to use sac. Default: (False, False,\n            False, False).\n        rfp_inplanes (int, optional): The number of channels from RFP.\n            Default: None. If specified, an additional conv layer will be\n            added for ``rfp_feat``. Otherwise, the structure is the same as\n            base class.\n        output_img (bool): If ``True``, the input image will be inserted into\n            the starting position of output. Default: False.\n    \"\"\"\n\n    arch_settings = {\n        50: (Bottleneck, (3, 4, 6, 3)),\n        101: (Bottleneck, (3, 4, 23, 3)),\n        152: (Bottleneck, (3, 8, 36, 3))\n    }\n\n    def __init__(self,\n                 sac=None,\n                 stage_with_sac=(False, False, False, False),\n                 rfp_inplanes=None,\n                 output_img=False,\n                 pretrained=None,\n                 init_cfg=None,\n                 **kwargs):\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        self.pretrained = pretrained\n        if init_cfg is not None:\n            assert isinstance(init_cfg, dict), \\\n                f'init_cfg must be a dict, but got {type(init_cfg)}'\n            if 'type' in init_cfg:\n                assert init_cfg.get('type') == 'Pretrained', \\\n                    'Only can initialize module by loading a pretrained model'\n            else:\n                raise KeyError('`init_cfg` must contain the key \"type\"')\n            self.pretrained = init_cfg.get('checkpoint')\n        self.sac = sac\n        self.stage_with_sac = stage_with_sac\n        self.rfp_inplanes = rfp_inplanes\n        self.output_img = output_img\n        super(DetectoRS_ResNet, self).__init__(**kwargs)\n\n        self.inplanes = self.stem_channels\n        self.res_layers = []\n        for i, num_blocks in enumerate(self.stage_blocks):\n            stride = self.strides[i]\n            dilation = self.dilations[i]\n            dcn = self.dcn if self.stage_with_dcn[i] else None\n            sac = self.sac if self.stage_with_sac[i] else None\n            if self.plugins is not None:\n                stage_plugins = self.make_stage_plugins(self.plugins, i)\n            else:\n                stage_plugins = None\n            planes = self.base_channels * 2**i\n            res_layer = self.make_res_layer(\n                block=self.block,\n                inplanes=self.inplanes,\n                planes=planes,\n                num_blocks=num_blocks,\n                stride=stride,\n                dilation=dilation,\n                style=self.style,\n                avg_down=self.avg_down,\n                with_cp=self.with_cp,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg,\n                dcn=dcn,\n                sac=sac,\n                rfp_inplanes=rfp_inplanes if i > 0 else None,\n                plugins=stage_plugins)\n            self.inplanes = planes * self.block.expansion\n            layer_name = f'layer{i + 1}'\n            self.add_module(layer_name, res_layer)\n            self.res_layers.append(layer_name)\n\n        self._freeze_stages()\n\n    # In order to be properly initialized by RFP\n    def init_weights(self):\n        # Calling this method will cause parameter initialization exception\n        # super(DetectoRS_ResNet, self).init_weights()\n\n        if isinstance(self.pretrained, str):\n            logger = get_root_logger()\n            load_checkpoint(self, self.pretrained, strict=False, logger=logger)\n        elif self.pretrained is None:\n            for m in self.modules():\n                if isinstance(m, nn.Conv2d):\n                    kaiming_init(m)\n                elif isinstance(m, (_BatchNorm, nn.GroupNorm)):\n                    constant_init(m, 1)\n\n            if self.dcn is not None:\n                for m in self.modules():\n                    if isinstance(m, Bottleneck) and hasattr(\n                            m.conv2, 'conv_offset'):\n                        constant_init(m.conv2.conv_offset, 0)\n\n            if self.zero_init_residual:\n                for m in self.modules():\n                    if isinstance(m, Bottleneck):\n                        constant_init(m.norm3, 0)\n                    elif isinstance(m, BasicBlock):\n                        constant_init(m.norm2, 0)\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n    def make_res_layer(self, **kwargs):\n        \"\"\"Pack all blocks in a stage into a ``ResLayer`` for DetectoRS.\"\"\"\n        return ResLayer(**kwargs)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        outs = list(super(DetectoRS_ResNet, self).forward(x))\n        if self.output_img:\n            outs.insert(0, x)\n        return tuple(outs)\n\n    def rfp_forward(self, x, rfp_feats):\n        \"\"\"Forward function for RFP.\"\"\"\n        if self.deep_stem:\n            x = self.stem(x)\n        else:\n            x = self.conv1(x)\n            x = self.norm1(x)\n            x = self.relu(x)\n        x = self.maxpool(x)\n        outs = []\n        for i, layer_name in enumerate(self.res_layers):\n            res_layer = getattr(self, layer_name)\n            rfp_feat = rfp_feats[i] if i > 0 else None\n            for layer in res_layer:\n                x = layer.rfp_forward(x, rfp_feat)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/backbones/detectors_resnext.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfrom ..builder import BACKBONES\nfrom .detectors_resnet import Bottleneck as _Bottleneck\nfrom .detectors_resnet import DetectoRS_ResNet\n\n\nclass Bottleneck(_Bottleneck):\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 groups=1,\n                 base_width=4,\n                 base_channels=64,\n                 **kwargs):\n        \"\"\"Bottleneck block for ResNeXt.\n\n        If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\n        it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n        \"\"\"\n        super(Bottleneck, self).__init__(inplanes, planes, **kwargs)\n\n        if groups == 1:\n            width = self.planes\n        else:\n            width = math.floor(self.planes *\n                               (base_width / base_channels)) * groups\n\n        self.norm1_name, norm1 = build_norm_layer(\n            self.norm_cfg, width, postfix=1)\n        self.norm2_name, norm2 = build_norm_layer(\n            self.norm_cfg, width, postfix=2)\n        self.norm3_name, norm3 = build_norm_layer(\n            self.norm_cfg, self.planes * self.expansion, postfix=3)\n\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            self.inplanes,\n            width,\n            kernel_size=1,\n            stride=self.conv1_stride,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n        fallback_on_stride = False\n        self.with_modulated_dcn = False\n        if self.with_dcn:\n            fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\n        if self.with_sac:\n            self.conv2 = build_conv_layer(\n                self.sac,\n                width,\n                width,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                groups=groups,\n                bias=False)\n        elif not self.with_dcn or fallback_on_stride:\n            self.conv2 = build_conv_layer(\n                self.conv_cfg,\n                width,\n                width,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                groups=groups,\n                bias=False)\n        else:\n            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n            self.conv2 = build_conv_layer(\n                self.dcn,\n                width,\n                width,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                groups=groups,\n                bias=False)\n\n        self.add_module(self.norm2_name, norm2)\n        self.conv3 = build_conv_layer(\n            self.conv_cfg,\n            width,\n            self.planes * self.expansion,\n            kernel_size=1,\n            bias=False)\n        self.add_module(self.norm3_name, norm3)\n\n\n@BACKBONES.register_module()\nclass DetectoRS_ResNeXt(DetectoRS_ResNet):\n    \"\"\"ResNeXt backbone for DetectoRS.\n\n    Args:\n        groups (int): The number of groups in ResNeXt.\n        base_width (int): The base width of ResNeXt.\n    \"\"\"\n\n    arch_settings = {\n        50: (Bottleneck, (3, 4, 6, 3)),\n        101: (Bottleneck, (3, 4, 23, 3)),\n        152: (Bottleneck, (3, 8, 36, 3))\n    }\n\n    def __init__(self, groups=1, base_width=4, **kwargs):\n        self.groups = groups\n        self.base_width = base_width\n        super(DetectoRS_ResNeXt, self).__init__(**kwargs)\n\n    def make_res_layer(self, **kwargs):\n        return super().make_res_layer(\n            groups=self.groups,\n            base_width=self.base_width,\n            base_channels=self.base_channels,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/efficientnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport math\nfrom functools import partial\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn.bricks import ConvModule, DropPath\nfrom mmcv.runner import BaseModule, Sequential\n\nfrom ..builder import BACKBONES\nfrom ..utils import InvertedResidual, SELayer, make_divisible\n\n\nclass EdgeResidual(BaseModule):\n    \"\"\"Edge Residual Block.\n\n    Args:\n        in_channels (int): The input channels of this module.\n        out_channels (int): The output channels of this module.\n        mid_channels (int): The input channels of the second convolution.\n        kernel_size (int): The kernel size of the first convolution.\n            Defaults to 3.\n        stride (int): The stride of the first convolution. Defaults to 1.\n        se_cfg (dict, optional): Config dict for se layer. Defaults to None,\n            which means no se layer.\n        with_residual (bool): Use residual connection. Defaults to True.\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Defaults to None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Defaults to ``dict(type='BN')``.\n        act_cfg (dict): Config dict for activation layer.\n            Defaults to ``dict(type='ReLU')``.\n        drop_path_rate (float): stochastic depth rate. Defaults to 0.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed. Defaults to False.\n        init_cfg (dict | list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 mid_channels,\n                 kernel_size=3,\n                 stride=1,\n                 se_cfg=None,\n                 with_residual=True,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 act_cfg=dict(type='ReLU'),\n                 drop_path_rate=0.,\n                 with_cp=False,\n                 init_cfg=None,\n                 **kwargs):\n        super(EdgeResidual, self).__init__(init_cfg=init_cfg)\n        assert stride in [1, 2]\n        self.with_cp = with_cp\n        self.drop_path = DropPath(\n            drop_path_rate) if drop_path_rate > 0 else nn.Identity()\n        self.with_se = se_cfg is not None\n        self.with_residual = (\n            stride == 1 and in_channels == out_channels and with_residual)\n\n        if self.with_se:\n            assert isinstance(se_cfg, dict)\n\n        self.conv1 = ConvModule(\n            in_channels=in_channels,\n            out_channels=mid_channels,\n            kernel_size=kernel_size,\n            stride=1,\n            padding=kernel_size // 2,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n        if self.with_se:\n            self.se = SELayer(**se_cfg)\n\n        self.conv2 = ConvModule(\n            in_channels=mid_channels,\n            out_channels=out_channels,\n            kernel_size=1,\n            stride=stride,\n            padding=0,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n    def forward(self, x):\n\n        def _inner_forward(x):\n            out = x\n            out = self.conv1(out)\n\n            if self.with_se:\n                out = self.se(out)\n\n            out = self.conv2(out)\n\n            if self.with_residual:\n                return x + self.drop_path(out)\n            else:\n                return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        return out\n\n\ndef model_scaling(layer_setting, arch_setting):\n    \"\"\"Scaling operation to the layer's parameters according to the\n    arch_setting.\"\"\"\n    # scale width\n    new_layer_setting = copy.deepcopy(layer_setting)\n    for layer_cfg in new_layer_setting:\n        for block_cfg in layer_cfg:\n            block_cfg[1] = make_divisible(block_cfg[1] * arch_setting[0], 8)\n\n    # scale depth\n    split_layer_setting = [new_layer_setting[0]]\n    for layer_cfg in new_layer_setting[1:-1]:\n        tmp_index = [0]\n        for i in range(len(layer_cfg) - 1):\n            if layer_cfg[i + 1][1] != layer_cfg[i][1]:\n                tmp_index.append(i + 1)\n        tmp_index.append(len(layer_cfg))\n        for i in range(len(tmp_index) - 1):\n            split_layer_setting.append(layer_cfg[tmp_index[i]:tmp_index[i +\n                                                                        1]])\n    split_layer_setting.append(new_layer_setting[-1])\n\n    num_of_layers = [len(layer_cfg) for layer_cfg in split_layer_setting[1:-1]]\n    new_layers = [\n        int(math.ceil(arch_setting[1] * num)) for num in num_of_layers\n    ]\n\n    merge_layer_setting = [split_layer_setting[0]]\n    for i, layer_cfg in enumerate(split_layer_setting[1:-1]):\n        if new_layers[i] <= num_of_layers[i]:\n            tmp_layer_cfg = layer_cfg[:new_layers[i]]\n        else:\n            tmp_layer_cfg = copy.deepcopy(layer_cfg) + [layer_cfg[-1]] * (\n                new_layers[i] - num_of_layers[i])\n        if tmp_layer_cfg[0][3] == 1 and i != 0:\n            merge_layer_setting[-1] += tmp_layer_cfg.copy()\n        else:\n            merge_layer_setting.append(tmp_layer_cfg.copy())\n    merge_layer_setting.append(split_layer_setting[-1])\n\n    return merge_layer_setting\n\n\n@BACKBONES.register_module()\nclass EfficientNet(BaseModule):\n    \"\"\"EfficientNet backbone.\n\n    Args:\n        arch (str): Architecture of efficientnet. Defaults to b0.\n        out_indices (Sequence[int]): Output from which stages.\n            Defaults to (6, ).\n        frozen_stages (int): Stages to be frozen (all param fixed).\n            Defaults to 0, which means not freezing any parameters.\n        conv_cfg (dict): Config dict for convolution layer.\n            Defaults to None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Defaults to dict(type='BN').\n        act_cfg (dict): Config dict for activation layer.\n            Defaults to dict(type='Swish').\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only. Defaults to False.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed. Defaults to False.\n    \"\"\"\n\n    # Parameters to build layers.\n    # 'b' represents the architecture of normal EfficientNet family includes\n    # 'b0', 'b1', 'b2', 'b3', 'b4', 'b5', 'b6', 'b7', 'b8'.\n    # 'e' represents the architecture of EfficientNet-EdgeTPU including 'es',\n    # 'em', 'el'.\n    # 6 parameters are needed to construct a layer, From left to right:\n    # - kernel_size: The kernel size of the block\n    # - out_channel: The number of out_channels of the block\n    # - se_ratio: The sequeeze ratio of SELayer.\n    # - stride: The stride of the block\n    # - expand_ratio: The expand_ratio of the mid_channels\n    # - block_type: -1: Not a block, 0: InvertedResidual, 1: EdgeResidual\n    layer_settings = {\n        'b': [[[3, 32, 0, 2, 0, -1]],\n              [[3, 16, 4, 1, 1, 0]],\n              [[3, 24, 4, 2, 6, 0],\n               [3, 24, 4, 1, 6, 0]],\n              [[5, 40, 4, 2, 6, 0],\n               [5, 40, 4, 1, 6, 0]],\n              [[3, 80, 4, 2, 6, 0],\n               [3, 80, 4, 1, 6, 0],\n               [3, 80, 4, 1, 6, 0],\n               [5, 112, 4, 1, 6, 0],\n               [5, 112, 4, 1, 6, 0],\n               [5, 112, 4, 1, 6, 0]],\n              [[5, 192, 4, 2, 6, 0],\n               [5, 192, 4, 1, 6, 0],\n               [5, 192, 4, 1, 6, 0],\n               [5, 192, 4, 1, 6, 0],\n               [3, 320, 4, 1, 6, 0]],\n              [[1, 1280, 0, 1, 0, -1]]\n              ],\n        'e': [[[3, 32, 0, 2, 0, -1]],\n              [[3, 24, 0, 1, 3, 1]],\n              [[3, 32, 0, 2, 8, 1],\n               [3, 32, 0, 1, 8, 1]],\n              [[3, 48, 0, 2, 8, 1],\n               [3, 48, 0, 1, 8, 1],\n               [3, 48, 0, 1, 8, 1],\n               [3, 48, 0, 1, 8, 1]],\n              [[5, 96, 0, 2, 8, 0],\n               [5, 96, 0, 1, 8, 0],\n               [5, 96, 0, 1, 8, 0],\n               [5, 96, 0, 1, 8, 0],\n               [5, 96, 0, 1, 8, 0],\n               [5, 144, 0, 1, 8, 0],\n               [5, 144, 0, 1, 8, 0],\n               [5, 144, 0, 1, 8, 0],\n               [5, 144, 0, 1, 8, 0]],\n              [[5, 192, 0, 2, 8, 0],\n               [5, 192, 0, 1, 8, 0]],\n              [[1, 1280, 0, 1, 0, -1]]\n              ]\n    }  # yapf: disable\n\n    # Parameters to build different kinds of architecture.\n    # From left to right: scaling factor for width, scaling factor for depth,\n    # resolution.\n    arch_settings = {\n        'b0': (1.0, 1.0, 224),\n        'b1': (1.0, 1.1, 240),\n        'b2': (1.1, 1.2, 260),\n        'b3': (1.2, 1.4, 300),\n        'b4': (1.4, 1.8, 380),\n        'b5': (1.6, 2.2, 456),\n        'b6': (1.8, 2.6, 528),\n        'b7': (2.0, 3.1, 600),\n        'b8': (2.2, 3.6, 672),\n        'es': (1.0, 1.0, 224),\n        'em': (1.0, 1.1, 240),\n        'el': (1.2, 1.4, 300)\n    }\n\n    def __init__(self,\n                 arch='b0',\n                 drop_path_rate=0.,\n                 out_indices=(6, ),\n                 frozen_stages=0,\n                 conv_cfg=dict(type='Conv2dAdaptivePadding'),\n                 norm_cfg=dict(type='BN', eps=1e-3),\n                 act_cfg=dict(type='Swish'),\n                 norm_eval=False,\n                 with_cp=False,\n                 init_cfg=[\n                     dict(type='Kaiming', layer='Conv2d'),\n                     dict(\n                         type='Constant',\n                         layer=['_BatchNorm', 'GroupNorm'],\n                         val=1)\n                 ]):\n        super(EfficientNet, self).__init__(init_cfg)\n        assert arch in self.arch_settings, \\\n            f'\"{arch}\" is not one of the arch_settings ' \\\n            f'({\", \".join(self.arch_settings.keys())})'\n        self.arch_setting = self.arch_settings[arch]\n        self.layer_setting = self.layer_settings[arch[:1]]\n        for index in out_indices:\n            if index not in range(0, len(self.layer_setting)):\n                raise ValueError('the item in out_indices must in '\n                                 f'range(0, {len(self.layer_setting)}). '\n                                 f'But received {index}')\n\n        if frozen_stages not in range(len(self.layer_setting) + 1):\n            raise ValueError('frozen_stages must be in range(0, '\n                             f'{len(self.layer_setting) + 1}). '\n                             f'But received {frozen_stages}')\n        self.drop_path_rate = drop_path_rate\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n        self.norm_eval = norm_eval\n        self.with_cp = with_cp\n\n        self.layer_setting = model_scaling(self.layer_setting,\n                                           self.arch_setting)\n        block_cfg_0 = self.layer_setting[0][0]\n        block_cfg_last = self.layer_setting[-1][0]\n        self.in_channels = make_divisible(block_cfg_0[1], 8)\n        self.out_channels = block_cfg_last[1]\n        self.layers = nn.ModuleList()\n        self.layers.append(\n            ConvModule(\n                in_channels=3,\n                out_channels=self.in_channels,\n                kernel_size=block_cfg_0[0],\n                stride=block_cfg_0[3],\n                padding=block_cfg_0[0] // 2,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg,\n                act_cfg=self.act_cfg))\n        self.make_layer()\n        # Avoid building unused layers in mmdetection.\n        if len(self.layers) < max(self.out_indices) + 1:\n            self.layers.append(\n                ConvModule(\n                    in_channels=self.in_channels,\n                    out_channels=self.out_channels,\n                    kernel_size=block_cfg_last[0],\n                    stride=block_cfg_last[3],\n                    padding=block_cfg_last[0] // 2,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    act_cfg=self.act_cfg))\n\n    def make_layer(self):\n        # Without the first and the final conv block.\n        layer_setting = self.layer_setting[1:-1]\n\n        total_num_blocks = sum([len(x) for x in layer_setting])\n        block_idx = 0\n        dpr = [\n            x.item()\n            for x in torch.linspace(0, self.drop_path_rate, total_num_blocks)\n        ]  # stochastic depth decay rule\n\n        for i, layer_cfg in enumerate(layer_setting):\n            # Avoid building unused layers in mmdetection.\n            if i > max(self.out_indices) - 1:\n                break\n            layer = []\n            for i, block_cfg in enumerate(layer_cfg):\n                (kernel_size, out_channels, se_ratio, stride, expand_ratio,\n                 block_type) = block_cfg\n\n                mid_channels = int(self.in_channels * expand_ratio)\n                out_channels = make_divisible(out_channels, 8)\n                if se_ratio <= 0:\n                    se_cfg = None\n                else:\n                    # In mmdetection, the `divisor` is deleted to align\n                    # the logic of SELayer with mmcls.\n                    se_cfg = dict(\n                        channels=mid_channels,\n                        ratio=expand_ratio * se_ratio,\n                        act_cfg=(self.act_cfg, dict(type='Sigmoid')))\n                if block_type == 1:  # edge tpu\n                    if i > 0 and expand_ratio == 3:\n                        with_residual = False\n                        expand_ratio = 4\n                    else:\n                        with_residual = True\n                    mid_channels = int(self.in_channels * expand_ratio)\n                    if se_cfg is not None:\n                        # In mmdetection, the `divisor` is deleted to align\n                        # the logic of SELayer with mmcls.\n                        se_cfg = dict(\n                            channels=mid_channels,\n                            ratio=se_ratio * expand_ratio,\n                            act_cfg=(self.act_cfg, dict(type='Sigmoid')))\n                    block = partial(EdgeResidual, with_residual=with_residual)\n                else:\n                    block = InvertedResidual\n                layer.append(\n                    block(\n                        in_channels=self.in_channels,\n                        out_channels=out_channels,\n                        mid_channels=mid_channels,\n                        kernel_size=kernel_size,\n                        stride=stride,\n                        se_cfg=se_cfg,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg,\n                        drop_path_rate=dpr[block_idx],\n                        with_cp=self.with_cp,\n                        # In mmdetection, `with_expand_conv` is set to align\n                        # the logic of InvertedResidual with mmcls.\n                        with_expand_conv=(mid_channels != self.in_channels)))\n                self.in_channels = out_channels\n                block_idx += 1\n            self.layers.append(Sequential(*layer))\n\n    def forward(self, x):\n        outs = []\n        for i, layer in enumerate(self.layers):\n            x = layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n\n        return tuple(outs)\n\n    def _freeze_stages(self):\n        for i in range(self.frozen_stages):\n            m = self.layers[i]\n            m.eval()\n            for param in m.parameters():\n                param.requires_grad = False\n\n    def train(self, mode=True):\n        super(EfficientNet, self).train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eval()\n"
  },
  {
    "path": "mmdet/models/backbones/hourglass.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import BACKBONES\nfrom ..utils import ResLayer\nfrom .resnet import BasicBlock\n\n\nclass HourglassModule(BaseModule):\n    \"\"\"Hourglass Module for HourglassNet backbone.\n\n    Generate module recursively and use BasicBlock as the base unit.\n\n    Args:\n        depth (int): Depth of current HourglassModule.\n        stage_channels (list[int]): Feature channels of sub-modules in current\n            and follow-up HourglassModule.\n        stage_blocks (list[int]): Number of sub-modules stacked in current and\n            follow-up HourglassModule.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n        upsample_cfg (dict, optional): Config dict for interpolate layer.\n            Default: `dict(mode='nearest')`\n    \"\"\"\n\n    def __init__(self,\n                 depth,\n                 stage_channels,\n                 stage_blocks,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 init_cfg=None,\n                 upsample_cfg=dict(mode='nearest')):\n        super(HourglassModule, self).__init__(init_cfg)\n\n        self.depth = depth\n\n        cur_block = stage_blocks[0]\n        next_block = stage_blocks[1]\n\n        cur_channel = stage_channels[0]\n        next_channel = stage_channels[1]\n\n        self.up1 = ResLayer(\n            BasicBlock, cur_channel, cur_channel, cur_block, norm_cfg=norm_cfg)\n\n        self.low1 = ResLayer(\n            BasicBlock,\n            cur_channel,\n            next_channel,\n            cur_block,\n            stride=2,\n            norm_cfg=norm_cfg)\n\n        if self.depth > 1:\n            self.low2 = HourglassModule(depth - 1, stage_channels[1:],\n                                        stage_blocks[1:])\n        else:\n            self.low2 = ResLayer(\n                BasicBlock,\n                next_channel,\n                next_channel,\n                next_block,\n                norm_cfg=norm_cfg)\n\n        self.low3 = ResLayer(\n            BasicBlock,\n            next_channel,\n            cur_channel,\n            cur_block,\n            norm_cfg=norm_cfg,\n            downsample_first=False)\n\n        self.up2 = F.interpolate\n        self.upsample_cfg = upsample_cfg\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        up1 = self.up1(x)\n        low1 = self.low1(x)\n        low2 = self.low2(low1)\n        low3 = self.low3(low2)\n        # Fixing `scale factor` (e.g. 2) is common for upsampling, but\n        # in some cases the spatial size is mismatched and error will arise.\n        if 'scale_factor' in self.upsample_cfg:\n            up2 = self.up2(low3, **self.upsample_cfg)\n        else:\n            shape = up1.shape[2:]\n            up2 = self.up2(low3, size=shape, **self.upsample_cfg)\n        return up1 + up2\n\n\n@BACKBONES.register_module()\nclass HourglassNet(BaseModule):\n    \"\"\"HourglassNet backbone.\n\n    Stacked Hourglass Networks for Human Pose Estimation.\n    More details can be found in the `paper\n    <https://arxiv.org/abs/1603.06937>`_ .\n\n    Args:\n        downsample_times (int): Downsample times in a HourglassModule.\n        num_stacks (int): Number of HourglassModule modules stacked,\n            1 for Hourglass-52, 2 for Hourglass-104.\n        stage_channels (list[int]): Feature channel of each sub-module in a\n            HourglassModule.\n        stage_blocks (list[int]): Number of sub-modules stacked in a\n            HourglassModule.\n        feat_channel (int): Feature channel of conv after a HourglassModule.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Example:\n        >>> from mmdet.models import HourglassNet\n        >>> import torch\n        >>> self = HourglassNet()\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 511, 511)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_output in level_outputs:\n        ...     print(tuple(level_output.shape))\n        (1, 256, 128, 128)\n        (1, 256, 128, 128)\n    \"\"\"\n\n    def __init__(self,\n                 downsample_times=5,\n                 num_stacks=2,\n                 stage_channels=(256, 256, 384, 384, 384, 512),\n                 stage_blocks=(2, 2, 2, 2, 2, 4),\n                 feat_channel=256,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 pretrained=None,\n                 init_cfg=None):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super(HourglassNet, self).__init__(init_cfg)\n\n        self.num_stacks = num_stacks\n        assert self.num_stacks >= 1\n        assert len(stage_channels) == len(stage_blocks)\n        assert len(stage_channels) > downsample_times\n\n        cur_channel = stage_channels[0]\n\n        self.stem = nn.Sequential(\n            ConvModule(\n                3, cur_channel // 2, 7, padding=3, stride=2,\n                norm_cfg=norm_cfg),\n            ResLayer(\n                BasicBlock,\n                cur_channel // 2,\n                cur_channel,\n                1,\n                stride=2,\n                norm_cfg=norm_cfg))\n\n        self.hourglass_modules = nn.ModuleList([\n            HourglassModule(downsample_times, stage_channels, stage_blocks)\n            for _ in range(num_stacks)\n        ])\n\n        self.inters = ResLayer(\n            BasicBlock,\n            cur_channel,\n            cur_channel,\n            num_stacks - 1,\n            norm_cfg=norm_cfg)\n\n        self.conv1x1s = nn.ModuleList([\n            ConvModule(\n                cur_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)\n            for _ in range(num_stacks - 1)\n        ])\n\n        self.out_convs = nn.ModuleList([\n            ConvModule(\n                cur_channel, feat_channel, 3, padding=1, norm_cfg=norm_cfg)\n            for _ in range(num_stacks)\n        ])\n\n        self.remap_convs = nn.ModuleList([\n            ConvModule(\n                feat_channel, cur_channel, 1, norm_cfg=norm_cfg, act_cfg=None)\n            for _ in range(num_stacks - 1)\n        ])\n\n        self.relu = nn.ReLU(inplace=True)\n\n    def init_weights(self):\n        \"\"\"Init module weights.\"\"\"\n        # Training Centripetal Model needs to reset parameters for Conv2d\n        super(HourglassNet, self).init_weights()\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                m.reset_parameters()\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        inter_feat = self.stem(x)\n        out_feats = []\n\n        for ind in range(self.num_stacks):\n            single_hourglass = self.hourglass_modules[ind]\n            out_conv = self.out_convs[ind]\n\n            hourglass_feat = single_hourglass(inter_feat)\n            out_feat = out_conv(hourglass_feat)\n            out_feats.append(out_feat)\n\n            if ind < self.num_stacks - 1:\n                inter_feat = self.conv1x1s[ind](\n                    inter_feat) + self.remap_convs[ind](\n                        out_feat)\n                inter_feat = self.inters[ind](self.relu(inter_feat))\n\n        return out_feats\n"
  },
  {
    "path": "mmdet/models/backbones/hrnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmcv.runner import BaseModule, ModuleList, Sequential\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..builder import BACKBONES\nfrom .resnet import BasicBlock, Bottleneck\n\n\nclass HRModule(BaseModule):\n    \"\"\"High-Resolution Module for HRNet.\n\n    In this module, every branch has 4 BasicBlocks/Bottlenecks. Fusion/Exchange\n    is in this module.\n    \"\"\"\n\n    def __init__(self,\n                 num_branches,\n                 blocks,\n                 num_blocks,\n                 in_channels,\n                 num_channels,\n                 multiscale_output=True,\n                 with_cp=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 block_init_cfg=None,\n                 init_cfg=None):\n        super(HRModule, self).__init__(init_cfg)\n        self.block_init_cfg = block_init_cfg\n        self._check_branches(num_branches, num_blocks, in_channels,\n                             num_channels)\n\n        self.in_channels = in_channels\n        self.num_branches = num_branches\n\n        self.multiscale_output = multiscale_output\n        self.norm_cfg = norm_cfg\n        self.conv_cfg = conv_cfg\n        self.with_cp = with_cp\n        self.branches = self._make_branches(num_branches, blocks, num_blocks,\n                                            num_channels)\n        self.fuse_layers = self._make_fuse_layers()\n        self.relu = nn.ReLU(inplace=False)\n\n    def _check_branches(self, num_branches, num_blocks, in_channels,\n                        num_channels):\n        if num_branches != len(num_blocks):\n            error_msg = f'NUM_BRANCHES({num_branches}) ' \\\n                        f'!= NUM_BLOCKS({len(num_blocks)})'\n            raise ValueError(error_msg)\n\n        if num_branches != len(num_channels):\n            error_msg = f'NUM_BRANCHES({num_branches}) ' \\\n                        f'!= NUM_CHANNELS({len(num_channels)})'\n            raise ValueError(error_msg)\n\n        if num_branches != len(in_channels):\n            error_msg = f'NUM_BRANCHES({num_branches}) ' \\\n                        f'!= NUM_INCHANNELS({len(in_channels)})'\n            raise ValueError(error_msg)\n\n    def _make_one_branch(self,\n                         branch_index,\n                         block,\n                         num_blocks,\n                         num_channels,\n                         stride=1):\n        downsample = None\n        if stride != 1 or \\\n                self.in_channels[branch_index] != \\\n                num_channels[branch_index] * block.expansion:\n            downsample = nn.Sequential(\n                build_conv_layer(\n                    self.conv_cfg,\n                    self.in_channels[branch_index],\n                    num_channels[branch_index] * block.expansion,\n                    kernel_size=1,\n                    stride=stride,\n                    bias=False),\n                build_norm_layer(self.norm_cfg, num_channels[branch_index] *\n                                 block.expansion)[1])\n\n        layers = []\n        layers.append(\n            block(\n                self.in_channels[branch_index],\n                num_channels[branch_index],\n                stride,\n                downsample=downsample,\n                with_cp=self.with_cp,\n                norm_cfg=self.norm_cfg,\n                conv_cfg=self.conv_cfg,\n                init_cfg=self.block_init_cfg))\n        self.in_channels[branch_index] = \\\n            num_channels[branch_index] * block.expansion\n        for i in range(1, num_blocks[branch_index]):\n            layers.append(\n                block(\n                    self.in_channels[branch_index],\n                    num_channels[branch_index],\n                    with_cp=self.with_cp,\n                    norm_cfg=self.norm_cfg,\n                    conv_cfg=self.conv_cfg,\n                    init_cfg=self.block_init_cfg))\n\n        return Sequential(*layers)\n\n    def _make_branches(self, num_branches, block, num_blocks, num_channels):\n        branches = []\n\n        for i in range(num_branches):\n            branches.append(\n                self._make_one_branch(i, block, num_blocks, num_channels))\n\n        return ModuleList(branches)\n\n    def _make_fuse_layers(self):\n        if self.num_branches == 1:\n            return None\n\n        num_branches = self.num_branches\n        in_channels = self.in_channels\n        fuse_layers = []\n        num_out_branches = num_branches if self.multiscale_output else 1\n        for i in range(num_out_branches):\n            fuse_layer = []\n            for j in range(num_branches):\n                if j > i:\n                    fuse_layer.append(\n                        nn.Sequential(\n                            build_conv_layer(\n                                self.conv_cfg,\n                                in_channels[j],\n                                in_channels[i],\n                                kernel_size=1,\n                                stride=1,\n                                padding=0,\n                                bias=False),\n                            build_norm_layer(self.norm_cfg, in_channels[i])[1],\n                            nn.Upsample(\n                                scale_factor=2**(j - i), mode='nearest')))\n                elif j == i:\n                    fuse_layer.append(None)\n                else:\n                    conv_downsamples = []\n                    for k in range(i - j):\n                        if k == i - j - 1:\n                            conv_downsamples.append(\n                                nn.Sequential(\n                                    build_conv_layer(\n                                        self.conv_cfg,\n                                        in_channels[j],\n                                        in_channels[i],\n                                        kernel_size=3,\n                                        stride=2,\n                                        padding=1,\n                                        bias=False),\n                                    build_norm_layer(self.norm_cfg,\n                                                     in_channels[i])[1]))\n                        else:\n                            conv_downsamples.append(\n                                nn.Sequential(\n                                    build_conv_layer(\n                                        self.conv_cfg,\n                                        in_channels[j],\n                                        in_channels[j],\n                                        kernel_size=3,\n                                        stride=2,\n                                        padding=1,\n                                        bias=False),\n                                    build_norm_layer(self.norm_cfg,\n                                                     in_channels[j])[1],\n                                    nn.ReLU(inplace=False)))\n                    fuse_layer.append(nn.Sequential(*conv_downsamples))\n            fuse_layers.append(nn.ModuleList(fuse_layer))\n\n        return nn.ModuleList(fuse_layers)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        if self.num_branches == 1:\n            return [self.branches[0](x[0])]\n\n        for i in range(self.num_branches):\n            x[i] = self.branches[i](x[i])\n\n        x_fuse = []\n        for i in range(len(self.fuse_layers)):\n            y = 0\n            for j in range(self.num_branches):\n                if i == j:\n                    y += x[j]\n                else:\n                    y += self.fuse_layers[i][j](x[j])\n            x_fuse.append(self.relu(y))\n        return x_fuse\n\n\n@BACKBONES.register_module()\nclass HRNet(BaseModule):\n    \"\"\"HRNet backbone.\n\n    `High-Resolution Representations for Labeling Pixels and Regions\n    arXiv: <https://arxiv.org/abs/1904.04514>`_.\n\n    Args:\n        extra (dict): Detailed configuration for each stage of HRNet.\n            There must be 4 stages, the configuration for each stage must have\n            5 keys:\n\n                - num_modules(int): The number of HRModule in this stage.\n                - num_branches(int): The number of branches in the HRModule.\n                - block(str): The type of convolution block.\n                - num_blocks(tuple): The number of blocks in each branch.\n                    The length must be equal to num_branches.\n                - num_channels(tuple): The number of channels in each branch.\n                    The length must be equal to num_branches.\n        in_channels (int): Number of input image channels. Default: 3.\n        conv_cfg (dict): Dictionary to construct and config conv layer.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only. Default: True.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed. Default: False.\n        zero_init_residual (bool): Whether to use zero init for last norm layer\n            in resblocks to let them behave as identity. Default: False.\n        multiscale_output (bool): Whether to output multi-level features\n            produced by multiple branches. If False, only the first level\n            feature will be output. Default: True.\n        pretrained (str, optional): Model pretrained path. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n\n    Example:\n        >>> from mmdet.models import HRNet\n        >>> import torch\n        >>> extra = dict(\n        >>>     stage1=dict(\n        >>>         num_modules=1,\n        >>>         num_branches=1,\n        >>>         block='BOTTLENECK',\n        >>>         num_blocks=(4, ),\n        >>>         num_channels=(64, )),\n        >>>     stage2=dict(\n        >>>         num_modules=1,\n        >>>         num_branches=2,\n        >>>         block='BASIC',\n        >>>         num_blocks=(4, 4),\n        >>>         num_channels=(32, 64)),\n        >>>     stage3=dict(\n        >>>         num_modules=4,\n        >>>         num_branches=3,\n        >>>         block='BASIC',\n        >>>         num_blocks=(4, 4, 4),\n        >>>         num_channels=(32, 64, 128)),\n        >>>     stage4=dict(\n        >>>         num_modules=3,\n        >>>         num_branches=4,\n        >>>         block='BASIC',\n        >>>         num_blocks=(4, 4, 4, 4),\n        >>>         num_channels=(32, 64, 128, 256)))\n        >>> self = HRNet(extra, in_channels=1)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 1, 32, 32)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        (1, 32, 8, 8)\n        (1, 64, 4, 4)\n        (1, 128, 2, 2)\n        (1, 256, 1, 1)\n    \"\"\"\n\n    blocks_dict = {'BASIC': BasicBlock, 'BOTTLENECK': Bottleneck}\n\n    def __init__(self,\n                 extra,\n                 in_channels=3,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 norm_eval=True,\n                 with_cp=False,\n                 zero_init_residual=False,\n                 multiscale_output=True,\n                 pretrained=None,\n                 init_cfg=None):\n        super(HRNet, self).__init__(init_cfg)\n\n        self.pretrained = pretrained\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        # Assert configurations of 4 stages are in extra\n        assert 'stage1' in extra and 'stage2' in extra \\\n               and 'stage3' in extra and 'stage4' in extra\n        # Assert whether the length of `num_blocks` and `num_channels` are\n        # equal to `num_branches`\n        for i in range(4):\n            cfg = extra[f'stage{i + 1}']\n            assert len(cfg['num_blocks']) == cfg['num_branches'] and \\\n                   len(cfg['num_channels']) == cfg['num_branches']\n\n        self.extra = extra\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.norm_eval = norm_eval\n        self.with_cp = with_cp\n        self.zero_init_residual = zero_init_residual\n\n        # stem net\n        self.norm1_name, norm1 = build_norm_layer(self.norm_cfg, 64, postfix=1)\n        self.norm2_name, norm2 = build_norm_layer(self.norm_cfg, 64, postfix=2)\n\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            in_channels,\n            64,\n            kernel_size=3,\n            stride=2,\n            padding=1,\n            bias=False)\n\n        self.add_module(self.norm1_name, norm1)\n        self.conv2 = build_conv_layer(\n            self.conv_cfg,\n            64,\n            64,\n            kernel_size=3,\n            stride=2,\n            padding=1,\n            bias=False)\n\n        self.add_module(self.norm2_name, norm2)\n        self.relu = nn.ReLU(inplace=True)\n\n        # stage 1\n        self.stage1_cfg = self.extra['stage1']\n        num_channels = self.stage1_cfg['num_channels'][0]\n        block_type = self.stage1_cfg['block']\n        num_blocks = self.stage1_cfg['num_blocks'][0]\n\n        block = self.blocks_dict[block_type]\n        stage1_out_channels = num_channels * block.expansion\n        self.layer1 = self._make_layer(block, 64, num_channels, num_blocks)\n\n        # stage 2\n        self.stage2_cfg = self.extra['stage2']\n        num_channels = self.stage2_cfg['num_channels']\n        block_type = self.stage2_cfg['block']\n\n        block = self.blocks_dict[block_type]\n        num_channels = [channel * block.expansion for channel in num_channels]\n        self.transition1 = self._make_transition_layer([stage1_out_channels],\n                                                       num_channels)\n        self.stage2, pre_stage_channels = self._make_stage(\n            self.stage2_cfg, num_channels)\n\n        # stage 3\n        self.stage3_cfg = self.extra['stage3']\n        num_channels = self.stage3_cfg['num_channels']\n        block_type = self.stage3_cfg['block']\n\n        block = self.blocks_dict[block_type]\n        num_channels = [channel * block.expansion for channel in num_channels]\n        self.transition2 = self._make_transition_layer(pre_stage_channels,\n                                                       num_channels)\n        self.stage3, pre_stage_channels = self._make_stage(\n            self.stage3_cfg, num_channels)\n\n        # stage 4\n        self.stage4_cfg = self.extra['stage4']\n        num_channels = self.stage4_cfg['num_channels']\n        block_type = self.stage4_cfg['block']\n\n        block = self.blocks_dict[block_type]\n        num_channels = [channel * block.expansion for channel in num_channels]\n        self.transition3 = self._make_transition_layer(pre_stage_channels,\n                                                       num_channels)\n        self.stage4, pre_stage_channels = self._make_stage(\n            self.stage4_cfg, num_channels, multiscale_output=multiscale_output)\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: the normalization layer named \"norm1\" \"\"\"\n        return getattr(self, self.norm1_name)\n\n    @property\n    def norm2(self):\n        \"\"\"nn.Module: the normalization layer named \"norm2\" \"\"\"\n        return getattr(self, self.norm2_name)\n\n    def _make_transition_layer(self, num_channels_pre_layer,\n                               num_channels_cur_layer):\n        num_branches_cur = len(num_channels_cur_layer)\n        num_branches_pre = len(num_channels_pre_layer)\n\n        transition_layers = []\n        for i in range(num_branches_cur):\n            if i < num_branches_pre:\n                if num_channels_cur_layer[i] != num_channels_pre_layer[i]:\n                    transition_layers.append(\n                        nn.Sequential(\n                            build_conv_layer(\n                                self.conv_cfg,\n                                num_channels_pre_layer[i],\n                                num_channels_cur_layer[i],\n                                kernel_size=3,\n                                stride=1,\n                                padding=1,\n                                bias=False),\n                            build_norm_layer(self.norm_cfg,\n                                             num_channels_cur_layer[i])[1],\n                            nn.ReLU(inplace=True)))\n                else:\n                    transition_layers.append(None)\n            else:\n                conv_downsamples = []\n                for j in range(i + 1 - num_branches_pre):\n                    in_channels = num_channels_pre_layer[-1]\n                    out_channels = num_channels_cur_layer[i] \\\n                        if j == i - num_branches_pre else in_channels\n                    conv_downsamples.append(\n                        nn.Sequential(\n                            build_conv_layer(\n                                self.conv_cfg,\n                                in_channels,\n                                out_channels,\n                                kernel_size=3,\n                                stride=2,\n                                padding=1,\n                                bias=False),\n                            build_norm_layer(self.norm_cfg, out_channels)[1],\n                            nn.ReLU(inplace=True)))\n                transition_layers.append(nn.Sequential(*conv_downsamples))\n\n        return nn.ModuleList(transition_layers)\n\n    def _make_layer(self, block, inplanes, planes, blocks, stride=1):\n        downsample = None\n        if stride != 1 or inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                build_conv_layer(\n                    self.conv_cfg,\n                    inplanes,\n                    planes * block.expansion,\n                    kernel_size=1,\n                    stride=stride,\n                    bias=False),\n                build_norm_layer(self.norm_cfg, planes * block.expansion)[1])\n\n        layers = []\n        block_init_cfg = None\n        if self.pretrained is None and not hasattr(\n                self, 'init_cfg') and self.zero_init_residual:\n            if block is BasicBlock:\n                block_init_cfg = dict(\n                    type='Constant', val=0, override=dict(name='norm2'))\n            elif block is Bottleneck:\n                block_init_cfg = dict(\n                    type='Constant', val=0, override=dict(name='norm3'))\n        layers.append(\n            block(\n                inplanes,\n                planes,\n                stride,\n                downsample=downsample,\n                with_cp=self.with_cp,\n                norm_cfg=self.norm_cfg,\n                conv_cfg=self.conv_cfg,\n                init_cfg=block_init_cfg,\n            ))\n        inplanes = planes * block.expansion\n        for i in range(1, blocks):\n            layers.append(\n                block(\n                    inplanes,\n                    planes,\n                    with_cp=self.with_cp,\n                    norm_cfg=self.norm_cfg,\n                    conv_cfg=self.conv_cfg,\n                    init_cfg=block_init_cfg))\n\n        return Sequential(*layers)\n\n    def _make_stage(self, layer_config, in_channels, multiscale_output=True):\n        num_modules = layer_config['num_modules']\n        num_branches = layer_config['num_branches']\n        num_blocks = layer_config['num_blocks']\n        num_channels = layer_config['num_channels']\n        block = self.blocks_dict[layer_config['block']]\n\n        hr_modules = []\n        block_init_cfg = None\n        if self.pretrained is None and not hasattr(\n                self, 'init_cfg') and self.zero_init_residual:\n            if block is BasicBlock:\n                block_init_cfg = dict(\n                    type='Constant', val=0, override=dict(name='norm2'))\n            elif block is Bottleneck:\n                block_init_cfg = dict(\n                    type='Constant', val=0, override=dict(name='norm3'))\n\n        for i in range(num_modules):\n            # multi_scale_output is only used for the last module\n            if not multiscale_output and i == num_modules - 1:\n                reset_multiscale_output = False\n            else:\n                reset_multiscale_output = True\n\n            hr_modules.append(\n                HRModule(\n                    num_branches,\n                    block,\n                    num_blocks,\n                    in_channels,\n                    num_channels,\n                    reset_multiscale_output,\n                    with_cp=self.with_cp,\n                    norm_cfg=self.norm_cfg,\n                    conv_cfg=self.conv_cfg,\n                    block_init_cfg=block_init_cfg))\n\n        return Sequential(*hr_modules), in_channels\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        x = self.conv1(x)\n        x = self.norm1(x)\n        x = self.relu(x)\n        x = self.conv2(x)\n        x = self.norm2(x)\n        x = self.relu(x)\n        x = self.layer1(x)\n\n        x_list = []\n        for i in range(self.stage2_cfg['num_branches']):\n            if self.transition1[i] is not None:\n                x_list.append(self.transition1[i](x))\n            else:\n                x_list.append(x)\n        y_list = self.stage2(x_list)\n\n        x_list = []\n        for i in range(self.stage3_cfg['num_branches']):\n            if self.transition2[i] is not None:\n                x_list.append(self.transition2[i](y_list[-1]))\n            else:\n                x_list.append(y_list[i])\n        y_list = self.stage3(x_list)\n\n        x_list = []\n        for i in range(self.stage4_cfg['num_branches']):\n            if self.transition3[i] is not None:\n                x_list.append(self.transition3[i](y_list[-1]))\n            else:\n                x_list.append(y_list[i])\n        y_list = self.stage4(x_list)\n\n        return y_list\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode will keeping the normalization\n        layer freezed.\"\"\"\n        super(HRNet, self).train(mode)\n        if mode and self.norm_eval:\n            for m in self.modules():\n                # trick: eval have effect on BatchNorm only\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n"
  },
  {
    "path": "mmdet/models/backbones/mobilenet_v2.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..builder import BACKBONES\nfrom ..utils import InvertedResidual, make_divisible\n\n\n@BACKBONES.register_module()\nclass MobileNetV2(BaseModule):\n    \"\"\"MobileNetV2 backbone.\n\n    Args:\n        widen_factor (float): Width multiplier, multiply number of\n            channels in each layer by this amount. Default: 1.0.\n        out_indices (Sequence[int], optional): Output from which stages.\n            Default: (1, 2, 4, 7).\n        frozen_stages (int): Stages to be frozen (all param fixed).\n            Default: -1, which means not freezing any parameters.\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Default: None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN').\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='ReLU6').\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only. Default: False.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed. Default: False.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    # Parameters to build layers. 4 parameters are needed to construct a\n    # layer, from left to right: expand_ratio, channel, num_blocks, stride.\n    arch_settings = [[1, 16, 1, 1], [6, 24, 2, 2], [6, 32, 3, 2],\n                     [6, 64, 4, 2], [6, 96, 3, 1], [6, 160, 3, 2],\n                     [6, 320, 1, 1]]\n\n    def __init__(self,\n                 widen_factor=1.,\n                 out_indices=(1, 2, 4, 7),\n                 frozen_stages=-1,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 act_cfg=dict(type='ReLU6'),\n                 norm_eval=False,\n                 with_cp=False,\n                 pretrained=None,\n                 init_cfg=None):\n        super(MobileNetV2, self).__init__(init_cfg)\n\n        self.pretrained = pretrained\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        self.widen_factor = widen_factor\n        self.out_indices = out_indices\n        if not set(out_indices).issubset(set(range(0, 8))):\n            raise ValueError('out_indices must be a subset of range'\n                             f'(0, 8). But received {out_indices}')\n\n        if frozen_stages not in range(-1, 8):\n            raise ValueError('frozen_stages must be in range(-1, 8). '\n                             f'But received {frozen_stages}')\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n        self.norm_eval = norm_eval\n        self.with_cp = with_cp\n\n        self.in_channels = make_divisible(32 * widen_factor, 8)\n\n        self.conv1 = ConvModule(\n            in_channels=3,\n            out_channels=self.in_channels,\n            kernel_size=3,\n            stride=2,\n            padding=1,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg,\n            act_cfg=self.act_cfg)\n\n        self.layers = []\n\n        for i, layer_cfg in enumerate(self.arch_settings):\n            expand_ratio, channel, num_blocks, stride = layer_cfg\n            out_channels = make_divisible(channel * widen_factor, 8)\n            inverted_res_layer = self.make_layer(\n                out_channels=out_channels,\n                num_blocks=num_blocks,\n                stride=stride,\n                expand_ratio=expand_ratio)\n            layer_name = f'layer{i + 1}'\n            self.add_module(layer_name, inverted_res_layer)\n            self.layers.append(layer_name)\n\n        if widen_factor > 1.0:\n            self.out_channel = int(1280 * widen_factor)\n        else:\n            self.out_channel = 1280\n\n        layer = ConvModule(\n            in_channels=self.in_channels,\n            out_channels=self.out_channel,\n            kernel_size=1,\n            stride=1,\n            padding=0,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg,\n            act_cfg=self.act_cfg)\n        self.add_module('conv2', layer)\n        self.layers.append('conv2')\n\n    def make_layer(self, out_channels, num_blocks, stride, expand_ratio):\n        \"\"\"Stack InvertedResidual blocks to build a layer for MobileNetV2.\n\n        Args:\n            out_channels (int): out_channels of block.\n            num_blocks (int): number of blocks.\n            stride (int): stride of the first block. Default: 1\n            expand_ratio (int): Expand the number of channels of the\n                hidden layer in InvertedResidual by this ratio. Default: 6.\n        \"\"\"\n        layers = []\n        for i in range(num_blocks):\n            if i >= 1:\n                stride = 1\n            layers.append(\n                InvertedResidual(\n                    self.in_channels,\n                    out_channels,\n                    mid_channels=int(round(self.in_channels * expand_ratio)),\n                    stride=stride,\n                    with_expand_conv=expand_ratio != 1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    act_cfg=self.act_cfg,\n                    with_cp=self.with_cp))\n            self.in_channels = out_channels\n\n        return nn.Sequential(*layers)\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            for param in self.conv1.parameters():\n                param.requires_grad = False\n        for i in range(1, self.frozen_stages + 1):\n            layer = getattr(self, f'layer{i}')\n            layer.eval()\n            for param in layer.parameters():\n                param.requires_grad = False\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        x = self.conv1(x)\n        outs = []\n        for i, layer_name in enumerate(self.layers):\n            layer = getattr(self, layer_name)\n            x = layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode while keep normalization layer\n        frozen.\"\"\"\n        super(MobileNetV2, self).train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                # trick: eval have effect on BatchNorm only\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n"
  },
  {
    "path": "mmdet/models/backbones/pvt.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import (Conv2d, build_activation_layer, build_norm_layer,\n                      constant_init, normal_init, trunc_normal_init)\nfrom mmcv.cnn.bricks.drop import build_dropout\nfrom mmcv.cnn.bricks.transformer import MultiheadAttention\nfrom mmcv.cnn.utils.weight_init import trunc_normal_\nfrom mmcv.runner import (BaseModule, ModuleList, Sequential, _load_checkpoint,\n                         load_state_dict)\nfrom torch.nn.modules.utils import _pair as to_2tuple\n\nfrom ...utils import get_root_logger\nfrom ..builder import BACKBONES\nfrom ..utils import PatchEmbed, nchw_to_nlc, nlc_to_nchw, pvt_convert\n\n\nclass MixFFN(BaseModule):\n    \"\"\"An implementation of MixFFN of PVT.\n\n    The differences between MixFFN & FFN:\n        1. Use 1X1 Conv to replace Linear layer.\n        2. Introduce 3X3 Depth-wise Conv to encode positional information.\n\n    Args:\n        embed_dims (int): The feature dimension. Same as\n            `MultiheadAttention`.\n        feedforward_channels (int): The hidden dimension of FFNs.\n        act_cfg (dict, optional): The activation config for FFNs.\n            Default: dict(type='GELU').\n        ffn_drop (float, optional): Probability of an element to be\n            zeroed in FFN. Default 0.0.\n        dropout_layer (obj:`ConfigDict`): The dropout_layer used\n            when adding the shortcut.\n            Default: None.\n        use_conv (bool): If True, add 3x3 DWConv between two Linear layers.\n            Defaults: False.\n        init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 feedforward_channels,\n                 act_cfg=dict(type='GELU'),\n                 ffn_drop=0.,\n                 dropout_layer=None,\n                 use_conv=False,\n                 init_cfg=None):\n        super(MixFFN, self).__init__(init_cfg=init_cfg)\n\n        self.embed_dims = embed_dims\n        self.feedforward_channels = feedforward_channels\n        self.act_cfg = act_cfg\n        activate = build_activation_layer(act_cfg)\n\n        in_channels = embed_dims\n        fc1 = Conv2d(\n            in_channels=in_channels,\n            out_channels=feedforward_channels,\n            kernel_size=1,\n            stride=1,\n            bias=True)\n        if use_conv:\n            # 3x3 depth wise conv to provide positional encode information\n            dw_conv = Conv2d(\n                in_channels=feedforward_channels,\n                out_channels=feedforward_channels,\n                kernel_size=3,\n                stride=1,\n                padding=(3 - 1) // 2,\n                bias=True,\n                groups=feedforward_channels)\n        fc2 = Conv2d(\n            in_channels=feedforward_channels,\n            out_channels=in_channels,\n            kernel_size=1,\n            stride=1,\n            bias=True)\n        drop = nn.Dropout(ffn_drop)\n        layers = [fc1, activate, drop, fc2, drop]\n        if use_conv:\n            layers.insert(1, dw_conv)\n        self.layers = Sequential(*layers)\n        self.dropout_layer = build_dropout(\n            dropout_layer) if dropout_layer else torch.nn.Identity()\n\n    def forward(self, x, hw_shape, identity=None):\n        out = nlc_to_nchw(x, hw_shape)\n        out = self.layers(out)\n        out = nchw_to_nlc(out)\n        if identity is None:\n            identity = x\n        return identity + self.dropout_layer(out)\n\n\nclass SpatialReductionAttention(MultiheadAttention):\n    \"\"\"An implementation of Spatial Reduction Attention of PVT.\n\n    This module is modified from MultiheadAttention which is a module from\n    mmcv.cnn.bricks.transformer.\n\n    Args:\n        embed_dims (int): The embedding dimension.\n        num_heads (int): Parallel attention heads.\n        attn_drop (float): A Dropout layer on attn_output_weights.\n            Default: 0.0.\n        proj_drop (float): A Dropout layer after `nn.MultiheadAttention`.\n            Default: 0.0.\n        dropout_layer (obj:`ConfigDict`): The dropout_layer used\n            when adding the shortcut. Default: None.\n        batch_first (bool): Key, Query and Value are shape of\n            (batch, n, embed_dim)\n            or (n, batch, embed_dim). Default: False.\n        qkv_bias (bool): enable bias for qkv if True. Default: True.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='LN').\n        sr_ratio (int): The ratio of spatial reduction of Spatial Reduction\n            Attention of PVT. Default: 1.\n        init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 attn_drop=0.,\n                 proj_drop=0.,\n                 dropout_layer=None,\n                 batch_first=True,\n                 qkv_bias=True,\n                 norm_cfg=dict(type='LN'),\n                 sr_ratio=1,\n                 init_cfg=None):\n        super().__init__(\n            embed_dims,\n            num_heads,\n            attn_drop,\n            proj_drop,\n            batch_first=batch_first,\n            dropout_layer=dropout_layer,\n            bias=qkv_bias,\n            init_cfg=init_cfg)\n\n        self.sr_ratio = sr_ratio\n        if sr_ratio > 1:\n            self.sr = Conv2d(\n                in_channels=embed_dims,\n                out_channels=embed_dims,\n                kernel_size=sr_ratio,\n                stride=sr_ratio)\n            # The ret[0] of build_norm_layer is norm name.\n            self.norm = build_norm_layer(norm_cfg, embed_dims)[1]\n\n        # handle the BC-breaking from https://github.com/open-mmlab/mmcv/pull/1418 # noqa\n        from mmdet import digit_version, mmcv_version\n        if mmcv_version < digit_version('1.3.17'):\n            warnings.warn('The legacy version of forward function in'\n                          'SpatialReductionAttention is deprecated in'\n                          'mmcv>=1.3.17 and will no longer support in the'\n                          'future. Please upgrade your mmcv.')\n            self.forward = self.legacy_forward\n\n    def forward(self, x, hw_shape, identity=None):\n\n        x_q = x\n        if self.sr_ratio > 1:\n            x_kv = nlc_to_nchw(x, hw_shape)\n            x_kv = self.sr(x_kv)\n            x_kv = nchw_to_nlc(x_kv)\n            x_kv = self.norm(x_kv)\n        else:\n            x_kv = x\n\n        if identity is None:\n            identity = x_q\n\n        # Because the dataflow('key', 'query', 'value') of\n        # ``torch.nn.MultiheadAttention`` is (num_query, batch,\n        # embed_dims), We should adjust the shape of dataflow from\n        # batch_first (batch, num_query, embed_dims) to num_query_first\n        # (num_query ,batch, embed_dims), and recover ``attn_output``\n        # from num_query_first to batch_first.\n        if self.batch_first:\n            x_q = x_q.transpose(0, 1)\n            x_kv = x_kv.transpose(0, 1)\n\n        out = self.attn(query=x_q, key=x_kv, value=x_kv)[0]\n\n        if self.batch_first:\n            out = out.transpose(0, 1)\n\n        return identity + self.dropout_layer(self.proj_drop(out))\n\n    def legacy_forward(self, x, hw_shape, identity=None):\n        \"\"\"multi head attention forward in mmcv version < 1.3.17.\"\"\"\n        x_q = x\n        if self.sr_ratio > 1:\n            x_kv = nlc_to_nchw(x, hw_shape)\n            x_kv = self.sr(x_kv)\n            x_kv = nchw_to_nlc(x_kv)\n            x_kv = self.norm(x_kv)\n        else:\n            x_kv = x\n\n        if identity is None:\n            identity = x_q\n\n        out = self.attn(query=x_q, key=x_kv, value=x_kv)[0]\n\n        return identity + self.dropout_layer(self.proj_drop(out))\n\n\nclass PVTEncoderLayer(BaseModule):\n    \"\"\"Implements one encoder layer in PVT.\n\n    Args:\n        embed_dims (int): The feature dimension.\n        num_heads (int): Parallel attention heads.\n        feedforward_channels (int): The hidden dimension for FFNs.\n        drop_rate (float): Probability of an element to be zeroed.\n            after the feed forward layer. Default: 0.0.\n        attn_drop_rate (float): The drop out rate for attention layer.\n            Default: 0.0.\n        drop_path_rate (float): stochastic depth rate. Default: 0.0.\n        qkv_bias (bool): enable bias for qkv if True.\n            Default: True.\n        act_cfg (dict): The activation config for FFNs.\n            Default: dict(type='GELU').\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='LN').\n        sr_ratio (int): The ratio of spatial reduction of Spatial Reduction\n            Attention of PVT. Default: 1.\n        use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN.\n            Default: False.\n        init_cfg (dict, optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 feedforward_channels,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.,\n                 qkv_bias=True,\n                 act_cfg=dict(type='GELU'),\n                 norm_cfg=dict(type='LN'),\n                 sr_ratio=1,\n                 use_conv_ffn=False,\n                 init_cfg=None):\n        super(PVTEncoderLayer, self).__init__(init_cfg=init_cfg)\n\n        # The ret[0] of build_norm_layer is norm name.\n        self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]\n\n        self.attn = SpatialReductionAttention(\n            embed_dims=embed_dims,\n            num_heads=num_heads,\n            attn_drop=attn_drop_rate,\n            proj_drop=drop_rate,\n            dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),\n            qkv_bias=qkv_bias,\n            norm_cfg=norm_cfg,\n            sr_ratio=sr_ratio)\n\n        # The ret[0] of build_norm_layer is norm name.\n        self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]\n\n        self.ffn = MixFFN(\n            embed_dims=embed_dims,\n            feedforward_channels=feedforward_channels,\n            ffn_drop=drop_rate,\n            dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),\n            use_conv=use_conv_ffn,\n            act_cfg=act_cfg)\n\n    def forward(self, x, hw_shape):\n        x = self.attn(self.norm1(x), hw_shape, identity=x)\n        x = self.ffn(self.norm2(x), hw_shape, identity=x)\n\n        return x\n\n\nclass AbsolutePositionEmbedding(BaseModule):\n    \"\"\"An implementation of the absolute position embedding in PVT.\n\n    Args:\n        pos_shape (int): The shape of the absolute position embedding.\n        pos_dim (int): The dimension of the absolute position embedding.\n        drop_rate (float): Probability of an element to be zeroed.\n            Default: 0.0.\n    \"\"\"\n\n    def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None):\n        super().__init__(init_cfg=init_cfg)\n\n        if isinstance(pos_shape, int):\n            pos_shape = to_2tuple(pos_shape)\n        elif isinstance(pos_shape, tuple):\n            if len(pos_shape) == 1:\n                pos_shape = to_2tuple(pos_shape[0])\n            assert len(pos_shape) == 2, \\\n                f'The size of image should have length 1 or 2, ' \\\n                f'but got {len(pos_shape)}'\n        self.pos_shape = pos_shape\n        self.pos_dim = pos_dim\n\n        self.pos_embed = nn.Parameter(\n            torch.zeros(1, pos_shape[0] * pos_shape[1], pos_dim))\n        self.drop = nn.Dropout(p=drop_rate)\n\n    def init_weights(self):\n        trunc_normal_(self.pos_embed, std=0.02)\n\n    def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'):\n        \"\"\"Resize pos_embed weights.\n\n        Resize pos_embed using bilinear interpolate method.\n\n        Args:\n            pos_embed (torch.Tensor): Position embedding weights.\n            input_shape (tuple): Tuple for (downsampled input image height,\n                downsampled input image width).\n            mode (str): Algorithm used for upsampling:\n                ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |\n                ``'trilinear'``. Default: ``'bilinear'``.\n\n        Return:\n            torch.Tensor: The resized pos_embed of shape [B, L_new, C].\n        \"\"\"\n        assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'\n        pos_h, pos_w = self.pos_shape\n        pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]\n        pos_embed_weight = pos_embed_weight.reshape(\n            1, pos_h, pos_w, self.pos_dim).permute(0, 3, 1, 2).contiguous()\n        pos_embed_weight = F.interpolate(\n            pos_embed_weight, size=input_shape, mode=mode)\n        pos_embed_weight = torch.flatten(pos_embed_weight,\n                                         2).transpose(1, 2).contiguous()\n        pos_embed = pos_embed_weight\n\n        return pos_embed\n\n    def forward(self, x, hw_shape, mode='bilinear'):\n        pos_embed = self.resize_pos_embed(self.pos_embed, hw_shape, mode)\n        return self.drop(x + pos_embed)\n\n\n@BACKBONES.register_module()\nclass PyramidVisionTransformer(BaseModule):\n    \"\"\"Pyramid Vision Transformer (PVT)\n\n    Implementation of `Pyramid Vision Transformer: A Versatile Backbone for\n    Dense Prediction without Convolutions\n    <https://arxiv.org/pdf/2102.12122.pdf>`_.\n\n    Args:\n        pretrain_img_size (int | tuple[int]): The size of input image when\n            pretrain. Defaults: 224.\n        in_channels (int): Number of input channels. Default: 3.\n        embed_dims (int): Embedding dimension. Default: 64.\n        num_stags (int): The num of stages. Default: 4.\n        num_layers (Sequence[int]): The layer number of each transformer encode\n            layer. Default: [3, 4, 6, 3].\n        num_heads (Sequence[int]): The attention heads of each transformer\n            encode layer. Default: [1, 2, 5, 8].\n        patch_sizes (Sequence[int]): The patch_size of each patch embedding.\n            Default: [4, 2, 2, 2].\n        strides (Sequence[int]): The stride of each patch embedding.\n            Default: [4, 2, 2, 2].\n        paddings (Sequence[int]): The padding of each patch embedding.\n            Default: [0, 0, 0, 0].\n        sr_ratios (Sequence[int]): The spatial reduction rate of each\n            transformer encode layer. Default: [8, 4, 2, 1].\n        out_indices (Sequence[int] | int): Output from which stages.\n            Default: (0, 1, 2, 3).\n        mlp_ratios (Sequence[int]): The ratio of the mlp hidden dim to the\n            embedding dim of each transformer encode layer.\n            Default: [8, 8, 4, 4].\n        qkv_bias (bool): Enable bias for qkv if True. Default: True.\n        drop_rate (float): Probability of an element to be zeroed.\n            Default 0.0.\n        attn_drop_rate (float): The drop out rate for attention layer.\n            Default 0.0.\n        drop_path_rate (float): stochastic depth rate. Default 0.1.\n        use_abs_pos_embed (bool): If True, add absolute position embedding to\n            the patch embedding. Defaults: True.\n        use_conv_ffn (bool): If True, use Convolutional FFN to replace FFN.\n            Default: False.\n        act_cfg (dict): The activation config for FFNs.\n            Default: dict(type='GELU').\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='LN').\n        pretrained (str, optional): model pretrained path. Default: None.\n        convert_weights (bool): The flag indicates whether the\n            pre-trained model is from the original repo. We may need\n            to convert some keys to make it compatible.\n            Default: True.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 pretrain_img_size=224,\n                 in_channels=3,\n                 embed_dims=64,\n                 num_stages=4,\n                 num_layers=[3, 4, 6, 3],\n                 num_heads=[1, 2, 5, 8],\n                 patch_sizes=[4, 2, 2, 2],\n                 strides=[4, 2, 2, 2],\n                 paddings=[0, 0, 0, 0],\n                 sr_ratios=[8, 4, 2, 1],\n                 out_indices=(0, 1, 2, 3),\n                 mlp_ratios=[8, 8, 4, 4],\n                 qkv_bias=True,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.1,\n                 use_abs_pos_embed=True,\n                 norm_after_stage=False,\n                 use_conv_ffn=False,\n                 act_cfg=dict(type='GELU'),\n                 norm_cfg=dict(type='LN', eps=1e-6),\n                 pretrained=None,\n                 convert_weights=True,\n                 init_cfg=None):\n        super().__init__(init_cfg=init_cfg)\n\n        self.convert_weights = convert_weights\n        if isinstance(pretrain_img_size, int):\n            pretrain_img_size = to_2tuple(pretrain_img_size)\n        elif isinstance(pretrain_img_size, tuple):\n            if len(pretrain_img_size) == 1:\n                pretrain_img_size = to_2tuple(pretrain_img_size[0])\n            assert len(pretrain_img_size) == 2, \\\n                f'The size of image should have length 1 or 2, ' \\\n                f'but got {len(pretrain_img_size)}'\n\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be setting at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            self.init_cfg = init_cfg\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        self.embed_dims = embed_dims\n\n        self.num_stages = num_stages\n        self.num_layers = num_layers\n        self.num_heads = num_heads\n        self.patch_sizes = patch_sizes\n        self.strides = strides\n        self.sr_ratios = sr_ratios\n        assert num_stages == len(num_layers) == len(num_heads) \\\n               == len(patch_sizes) == len(strides) == len(sr_ratios)\n\n        self.out_indices = out_indices\n        assert max(out_indices) < self.num_stages\n        self.pretrained = pretrained\n\n        # transformer encoder\n        dpr = [\n            x.item()\n            for x in torch.linspace(0, drop_path_rate, sum(num_layers))\n        ]  # stochastic num_layer decay rule\n\n        cur = 0\n        self.layers = ModuleList()\n        for i, num_layer in enumerate(num_layers):\n            embed_dims_i = embed_dims * num_heads[i]\n            patch_embed = PatchEmbed(\n                in_channels=in_channels,\n                embed_dims=embed_dims_i,\n                kernel_size=patch_sizes[i],\n                stride=strides[i],\n                padding=paddings[i],\n                bias=True,\n                norm_cfg=norm_cfg)\n\n            layers = ModuleList()\n            if use_abs_pos_embed:\n                pos_shape = pretrain_img_size // np.prod(patch_sizes[:i + 1])\n                pos_embed = AbsolutePositionEmbedding(\n                    pos_shape=pos_shape,\n                    pos_dim=embed_dims_i,\n                    drop_rate=drop_rate)\n                layers.append(pos_embed)\n            layers.extend([\n                PVTEncoderLayer(\n                    embed_dims=embed_dims_i,\n                    num_heads=num_heads[i],\n                    feedforward_channels=mlp_ratios[i] * embed_dims_i,\n                    drop_rate=drop_rate,\n                    attn_drop_rate=attn_drop_rate,\n                    drop_path_rate=dpr[cur + idx],\n                    qkv_bias=qkv_bias,\n                    act_cfg=act_cfg,\n                    norm_cfg=norm_cfg,\n                    sr_ratio=sr_ratios[i],\n                    use_conv_ffn=use_conv_ffn) for idx in range(num_layer)\n            ])\n            in_channels = embed_dims_i\n            # The ret[0] of build_norm_layer is norm name.\n            if norm_after_stage:\n                norm = build_norm_layer(norm_cfg, embed_dims_i)[1]\n            else:\n                norm = nn.Identity()\n            self.layers.append(ModuleList([patch_embed, layers, norm]))\n            cur += num_layer\n\n    def init_weights(self):\n        logger = get_root_logger()\n        if self.init_cfg is None:\n            logger.warn(f'No pre-trained weights for '\n                        f'{self.__class__.__name__}, '\n                        f'training start from scratch')\n            for m in self.modules():\n                if isinstance(m, nn.Linear):\n                    trunc_normal_init(m, std=.02, bias=0.)\n                elif isinstance(m, nn.LayerNorm):\n                    constant_init(m, 1.0)\n                elif isinstance(m, nn.Conv2d):\n                    fan_out = m.kernel_size[0] * m.kernel_size[\n                        1] * m.out_channels\n                    fan_out //= m.groups\n                    normal_init(m, 0, math.sqrt(2.0 / fan_out))\n                elif isinstance(m, AbsolutePositionEmbedding):\n                    m.init_weights()\n        else:\n            assert 'checkpoint' in self.init_cfg, f'Only support ' \\\n                                                  f'specify `Pretrained` in ' \\\n                                                  f'`init_cfg` in ' \\\n                                                  f'{self.__class__.__name__} '\n            checkpoint = _load_checkpoint(\n                self.init_cfg.checkpoint, logger=logger, map_location='cpu')\n            logger.warn(f'Load pre-trained model for '\n                        f'{self.__class__.__name__} from original repo')\n            if 'state_dict' in checkpoint:\n                state_dict = checkpoint['state_dict']\n            elif 'model' in checkpoint:\n                state_dict = checkpoint['model']\n            else:\n                state_dict = checkpoint\n            if self.convert_weights:\n                # Because pvt backbones are not supported by mmcls,\n                # so we need to convert pre-trained weights to match this\n                # implementation.\n                state_dict = pvt_convert(state_dict)\n            load_state_dict(self, state_dict, strict=False, logger=logger)\n\n    def forward(self, x):\n        outs = []\n\n        for i, layer in enumerate(self.layers):\n            x, hw_shape = layer[0](x)\n\n            for block in layer[1]:\n                x = block(x, hw_shape)\n            x = layer[2](x)\n            x = nlc_to_nchw(x, hw_shape)\n            if i in self.out_indices:\n                outs.append(x)\n\n        return outs\n\n\n@BACKBONES.register_module()\nclass PyramidVisionTransformerV2(PyramidVisionTransformer):\n    \"\"\"Implementation of `PVTv2: Improved Baselines with Pyramid Vision\n    Transformer <https://arxiv.org/pdf/2106.13797.pdf>`_.\"\"\"\n\n    def __init__(self, **kwargs):\n        super(PyramidVisionTransformerV2, self).__init__(\n            patch_sizes=[7, 3, 3, 3],\n            paddings=[3, 1, 1, 1],\n            use_abs_pos_embed=False,\n            norm_after_stage=True,\n            use_conv_ffn=True,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/regnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport numpy as np\nimport torch.nn as nn\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfrom ..builder import BACKBONES\nfrom .resnet import ResNet\nfrom .resnext import Bottleneck\n\n\n@BACKBONES.register_module()\nclass RegNet(ResNet):\n    \"\"\"RegNet backbone.\n\n    More details can be found in `paper <https://arxiv.org/abs/2003.13678>`_ .\n\n    Args:\n        arch (dict): The parameter of RegNets.\n\n            - w0 (int): initial width\n            - wa (float): slope of width\n            - wm (float): quantization parameter to quantize the width\n            - depth (int): depth of the backbone\n            - group_w (int): width of group\n            - bot_mul (float): bottleneck ratio, i.e. expansion of bottleneck.\n        strides (Sequence[int]): Strides of the first block of each stage.\n        base_channels (int): Base channels after stem layer.\n        in_channels (int): Number of input image channels. Default: 3.\n        dilations (Sequence[int]): Dilation of each stage.\n        out_indices (Sequence[int]): Output from which stages.\n        style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n            layer is the 3x3 conv layer, otherwise the stride-two layer is\n            the first 1x1 conv layer.\n        frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n            not freezing any parameters.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed.\n        zero_init_residual (bool): whether to use zero init for last norm layer\n            in resblocks to let them behave as identity.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Example:\n        >>> from mmdet.models import RegNet\n        >>> import torch\n        >>> self = RegNet(\n                arch=dict(\n                    w0=88,\n                    wa=26.31,\n                    wm=2.25,\n                    group_w=48,\n                    depth=25,\n                    bot_mul=1.0))\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 32, 32)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        (1, 96, 8, 8)\n        (1, 192, 4, 4)\n        (1, 432, 2, 2)\n        (1, 1008, 1, 1)\n    \"\"\"\n    arch_settings = {\n        'regnetx_400mf':\n        dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22, bot_mul=1.0),\n        'regnetx_800mf':\n        dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16, bot_mul=1.0),\n        'regnetx_1.6gf':\n        dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18, bot_mul=1.0),\n        'regnetx_3.2gf':\n        dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25, bot_mul=1.0),\n        'regnetx_4.0gf':\n        dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23, bot_mul=1.0),\n        'regnetx_6.4gf':\n        dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17, bot_mul=1.0),\n        'regnetx_8.0gf':\n        dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23, bot_mul=1.0),\n        'regnetx_12gf':\n        dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19, bot_mul=1.0),\n    }\n\n    def __init__(self,\n                 arch,\n                 in_channels=3,\n                 stem_channels=32,\n                 base_channels=32,\n                 strides=(2, 2, 2, 2),\n                 dilations=(1, 1, 1, 1),\n                 out_indices=(0, 1, 2, 3),\n                 style='pytorch',\n                 deep_stem=False,\n                 avg_down=False,\n                 frozen_stages=-1,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 norm_eval=True,\n                 dcn=None,\n                 stage_with_dcn=(False, False, False, False),\n                 plugins=None,\n                 with_cp=False,\n                 zero_init_residual=True,\n                 pretrained=None,\n                 init_cfg=None):\n        super(ResNet, self).__init__(init_cfg)\n\n        # Generate RegNet parameters first\n        if isinstance(arch, str):\n            assert arch in self.arch_settings, \\\n                f'\"arch\": \"{arch}\" is not one of the' \\\n                ' arch_settings'\n            arch = self.arch_settings[arch]\n        elif not isinstance(arch, dict):\n            raise ValueError('Expect \"arch\" to be either a string '\n                             f'or a dict, got {type(arch)}')\n\n        widths, num_stages = self.generate_regnet(\n            arch['w0'],\n            arch['wa'],\n            arch['wm'],\n            arch['depth'],\n        )\n        # Convert to per stage format\n        stage_widths, stage_blocks = self.get_stages_from_blocks(widths)\n        # Generate group widths and bot muls\n        group_widths = [arch['group_w'] for _ in range(num_stages)]\n        self.bottleneck_ratio = [arch['bot_mul'] for _ in range(num_stages)]\n        # Adjust the compatibility of stage_widths and group_widths\n        stage_widths, group_widths = self.adjust_width_group(\n            stage_widths, self.bottleneck_ratio, group_widths)\n\n        # Group params by stage\n        self.stage_widths = stage_widths\n        self.group_widths = group_widths\n        self.depth = sum(stage_blocks)\n        self.stem_channels = stem_channels\n        self.base_channels = base_channels\n        self.num_stages = num_stages\n        assert num_stages >= 1 and num_stages <= 4\n        self.strides = strides\n        self.dilations = dilations\n        assert len(strides) == len(dilations) == num_stages\n        self.out_indices = out_indices\n        assert max(out_indices) < num_stages\n        self.style = style\n        self.deep_stem = deep_stem\n        self.avg_down = avg_down\n        self.frozen_stages = frozen_stages\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.with_cp = with_cp\n        self.norm_eval = norm_eval\n        self.dcn = dcn\n        self.stage_with_dcn = stage_with_dcn\n        if dcn is not None:\n            assert len(stage_with_dcn) == num_stages\n        self.plugins = plugins\n        self.zero_init_residual = zero_init_residual\n        self.block = Bottleneck\n        expansion_bak = self.block.expansion\n        self.block.expansion = 1\n        self.stage_blocks = stage_blocks[:num_stages]\n\n        self._make_stem_layer(in_channels, stem_channels)\n\n        block_init_cfg = None\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n                if self.zero_init_residual:\n                    block_init_cfg = dict(\n                        type='Constant', val=0, override=dict(name='norm3'))\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        self.inplanes = stem_channels\n        self.res_layers = []\n        for i, num_blocks in enumerate(self.stage_blocks):\n            stride = self.strides[i]\n            dilation = self.dilations[i]\n            group_width = self.group_widths[i]\n            width = int(round(self.stage_widths[i] * self.bottleneck_ratio[i]))\n            stage_groups = width // group_width\n\n            dcn = self.dcn if self.stage_with_dcn[i] else None\n            if self.plugins is not None:\n                stage_plugins = self.make_stage_plugins(self.plugins, i)\n            else:\n                stage_plugins = None\n\n            res_layer = self.make_res_layer(\n                block=self.block,\n                inplanes=self.inplanes,\n                planes=self.stage_widths[i],\n                num_blocks=num_blocks,\n                stride=stride,\n                dilation=dilation,\n                style=self.style,\n                avg_down=self.avg_down,\n                with_cp=self.with_cp,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg,\n                dcn=dcn,\n                plugins=stage_plugins,\n                groups=stage_groups,\n                base_width=group_width,\n                base_channels=self.stage_widths[i],\n                init_cfg=block_init_cfg)\n            self.inplanes = self.stage_widths[i]\n            layer_name = f'layer{i + 1}'\n            self.add_module(layer_name, res_layer)\n            self.res_layers.append(layer_name)\n\n        self._freeze_stages()\n\n        self.feat_dim = stage_widths[-1]\n        self.block.expansion = expansion_bak\n\n    def _make_stem_layer(self, in_channels, base_channels):\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            in_channels,\n            base_channels,\n            kernel_size=3,\n            stride=2,\n            padding=1,\n            bias=False)\n        self.norm1_name, norm1 = build_norm_layer(\n            self.norm_cfg, base_channels, postfix=1)\n        self.add_module(self.norm1_name, norm1)\n        self.relu = nn.ReLU(inplace=True)\n\n    def generate_regnet(self,\n                        initial_width,\n                        width_slope,\n                        width_parameter,\n                        depth,\n                        divisor=8):\n        \"\"\"Generates per block width from RegNet parameters.\n\n        Args:\n            initial_width ([int]): Initial width of the backbone\n            width_slope ([float]): Slope of the quantized linear function\n            width_parameter ([int]): Parameter used to quantize the width.\n            depth ([int]): Depth of the backbone.\n            divisor (int, optional): The divisor of channels. Defaults to 8.\n\n        Returns:\n            list, int: return a list of widths of each stage and the number \\\n                of stages\n        \"\"\"\n        assert width_slope >= 0\n        assert initial_width > 0\n        assert width_parameter > 1\n        assert initial_width % divisor == 0\n        widths_cont = np.arange(depth) * width_slope + initial_width\n        ks = np.round(\n            np.log(widths_cont / initial_width) / np.log(width_parameter))\n        widths = initial_width * np.power(width_parameter, ks)\n        widths = np.round(np.divide(widths, divisor)) * divisor\n        num_stages = len(np.unique(widths))\n        widths, widths_cont = widths.astype(int).tolist(), widths_cont.tolist()\n        return widths, num_stages\n\n    @staticmethod\n    def quantize_float(number, divisor):\n        \"\"\"Converts a float to closest non-zero int divisible by divisor.\n\n        Args:\n            number (int): Original number to be quantized.\n            divisor (int): Divisor used to quantize the number.\n\n        Returns:\n            int: quantized number that is divisible by devisor.\n        \"\"\"\n        return int(round(number / divisor) * divisor)\n\n    def adjust_width_group(self, widths, bottleneck_ratio, groups):\n        \"\"\"Adjusts the compatibility of widths and groups.\n\n        Args:\n            widths (list[int]): Width of each stage.\n            bottleneck_ratio (float): Bottleneck ratio.\n            groups (int): number of groups in each stage\n\n        Returns:\n            tuple(list): The adjusted widths and groups of each stage.\n        \"\"\"\n        bottleneck_width = [\n            int(w * b) for w, b in zip(widths, bottleneck_ratio)\n        ]\n        groups = [min(g, w_bot) for g, w_bot in zip(groups, bottleneck_width)]\n        bottleneck_width = [\n            self.quantize_float(w_bot, g)\n            for w_bot, g in zip(bottleneck_width, groups)\n        ]\n        widths = [\n            int(w_bot / b)\n            for w_bot, b in zip(bottleneck_width, bottleneck_ratio)\n        ]\n        return widths, groups\n\n    def get_stages_from_blocks(self, widths):\n        \"\"\"Gets widths/stage_blocks of network at each stage.\n\n        Args:\n            widths (list[int]): Width in each stage.\n\n        Returns:\n            tuple(list): width and depth of each stage\n        \"\"\"\n        width_diff = [\n            width != width_prev\n            for width, width_prev in zip(widths + [0], [0] + widths)\n        ]\n        stage_widths = [\n            width for width, diff in zip(widths, width_diff[:-1]) if diff\n        ]\n        stage_blocks = np.diff([\n            depth for depth, diff in zip(range(len(width_diff)), width_diff)\n            if diff\n        ]).tolist()\n        return stage_widths, stage_blocks\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        x = self.conv1(x)\n        x = self.norm1(x)\n        x = self.relu(x)\n\n        outs = []\n        for i, layer_name in enumerate(self.res_layers):\n            res_layer = getattr(self, layer_name)\n            x = res_layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/backbones/res2net.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmcv.runner import Sequential\n\nfrom ..builder import BACKBONES\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .resnet import ResNet\n\n\nclass Bottle2neck(_Bottleneck):\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 scales=4,\n                 base_width=26,\n                 base_channels=64,\n                 stage_type='normal',\n                 **kwargs):\n        \"\"\"Bottle2neck block for Res2Net.\n\n        If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\n        it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n        \"\"\"\n        super(Bottle2neck, self).__init__(inplanes, planes, **kwargs)\n        assert scales > 1, 'Res2Net degenerates to ResNet when scales = 1.'\n        width = int(math.floor(self.planes * (base_width / base_channels)))\n\n        self.norm1_name, norm1 = build_norm_layer(\n            self.norm_cfg, width * scales, postfix=1)\n        self.norm3_name, norm3 = build_norm_layer(\n            self.norm_cfg, self.planes * self.expansion, postfix=3)\n\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            self.inplanes,\n            width * scales,\n            kernel_size=1,\n            stride=self.conv1_stride,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n\n        if stage_type == 'stage' and self.conv2_stride != 1:\n            self.pool = nn.AvgPool2d(\n                kernel_size=3, stride=self.conv2_stride, padding=1)\n        convs = []\n        bns = []\n\n        fallback_on_stride = False\n        if self.with_dcn:\n            fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\n        if not self.with_dcn or fallback_on_stride:\n            for i in range(scales - 1):\n                convs.append(\n                    build_conv_layer(\n                        self.conv_cfg,\n                        width,\n                        width,\n                        kernel_size=3,\n                        stride=self.conv2_stride,\n                        padding=self.dilation,\n                        dilation=self.dilation,\n                        bias=False))\n                bns.append(\n                    build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])\n            self.convs = nn.ModuleList(convs)\n            self.bns = nn.ModuleList(bns)\n        else:\n            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n            for i in range(scales - 1):\n                convs.append(\n                    build_conv_layer(\n                        self.dcn,\n                        width,\n                        width,\n                        kernel_size=3,\n                        stride=self.conv2_stride,\n                        padding=self.dilation,\n                        dilation=self.dilation,\n                        bias=False))\n                bns.append(\n                    build_norm_layer(self.norm_cfg, width, postfix=i + 1)[1])\n            self.convs = nn.ModuleList(convs)\n            self.bns = nn.ModuleList(bns)\n\n        self.conv3 = build_conv_layer(\n            self.conv_cfg,\n            width * scales,\n            self.planes * self.expansion,\n            kernel_size=1,\n            bias=False)\n        self.add_module(self.norm3_name, norm3)\n\n        self.stage_type = stage_type\n        self.scales = scales\n        self.width = width\n        delattr(self, 'conv2')\n        delattr(self, self.norm2_name)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n\n        def _inner_forward(x):\n            identity = x\n\n            out = self.conv1(x)\n            out = self.norm1(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n            spx = torch.split(out, self.width, 1)\n            sp = self.convs[0](spx[0].contiguous())\n            sp = self.relu(self.bns[0](sp))\n            out = sp\n            for i in range(1, self.scales - 1):\n                if self.stage_type == 'stage':\n                    sp = spx[i]\n                else:\n                    sp = sp + spx[i]\n                sp = self.convs[i](sp.contiguous())\n                sp = self.relu(self.bns[i](sp))\n                out = torch.cat((out, sp), 1)\n\n            if self.stage_type == 'normal' or self.conv2_stride == 1:\n                out = torch.cat((out, spx[self.scales - 1]), 1)\n            elif self.stage_type == 'stage':\n                out = torch.cat((out, self.pool(spx[self.scales - 1])), 1)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n            out = self.conv3(out)\n            out = self.norm3(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n            if self.downsample is not None:\n                identity = self.downsample(x)\n\n            out += identity\n\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        out = self.relu(out)\n\n        return out\n\n\nclass Res2Layer(Sequential):\n    \"\"\"Res2Layer to build Res2Net style backbone.\n\n    Args:\n        block (nn.Module): block used to build ResLayer.\n        inplanes (int): inplanes of block.\n        planes (int): planes of block.\n        num_blocks (int): number of blocks.\n        stride (int): stride of the first block. Default: 1\n        avg_down (bool): Use AvgPool instead of stride conv when\n            downsampling in the bottle2neck. Default: False\n        conv_cfg (dict): dictionary to construct and config conv layer.\n            Default: None\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Default: dict(type='BN')\n        scales (int): Scales used in Res2Net. Default: 4\n        base_width (int): Basic width of each scale. Default: 26\n    \"\"\"\n\n    def __init__(self,\n                 block,\n                 inplanes,\n                 planes,\n                 num_blocks,\n                 stride=1,\n                 avg_down=True,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 scales=4,\n                 base_width=26,\n                 **kwargs):\n        self.block = block\n\n        downsample = None\n        if stride != 1 or inplanes != planes * block.expansion:\n            downsample = nn.Sequential(\n                nn.AvgPool2d(\n                    kernel_size=stride,\n                    stride=stride,\n                    ceil_mode=True,\n                    count_include_pad=False),\n                build_conv_layer(\n                    conv_cfg,\n                    inplanes,\n                    planes * block.expansion,\n                    kernel_size=1,\n                    stride=1,\n                    bias=False),\n                build_norm_layer(norm_cfg, planes * block.expansion)[1],\n            )\n\n        layers = []\n        layers.append(\n            block(\n                inplanes=inplanes,\n                planes=planes,\n                stride=stride,\n                downsample=downsample,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                scales=scales,\n                base_width=base_width,\n                stage_type='stage',\n                **kwargs))\n        inplanes = planes * block.expansion\n        for i in range(1, num_blocks):\n            layers.append(\n                block(\n                    inplanes=inplanes,\n                    planes=planes,\n                    stride=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    scales=scales,\n                    base_width=base_width,\n                    **kwargs))\n        super(Res2Layer, self).__init__(*layers)\n\n\n@BACKBONES.register_module()\nclass Res2Net(ResNet):\n    \"\"\"Res2Net backbone.\n\n    Args:\n        scales (int): Scales used in Res2Net. Default: 4\n        base_width (int): Basic width of each scale. Default: 26\n        depth (int): Depth of res2net, from {50, 101, 152}.\n        in_channels (int): Number of input image channels. Default: 3.\n        num_stages (int): Res2net stages. Default: 4.\n        strides (Sequence[int]): Strides of the first block of each stage.\n        dilations (Sequence[int]): Dilation of each stage.\n        out_indices (Sequence[int]): Output from which stages.\n        style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n            layer is the 3x3 conv layer, otherwise the stride-two layer is\n            the first 1x1 conv layer.\n        deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n        avg_down (bool): Use AvgPool instead of stride conv when\n            downsampling in the bottle2neck.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            -1 means not freezing any parameters.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        plugins (list[dict]): List of plugins for stages, each dict contains:\n\n            - cfg (dict, required): Cfg dict to build plugin.\n            - position (str, required): Position inside block to insert\n              plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.\n            - stages (tuple[bool], optional): Stages to apply plugin, length\n              should be same as 'num_stages'.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed.\n        zero_init_residual (bool): Whether to use zero init for last norm layer\n            in resblocks to let them behave as identity.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Example:\n        >>> from mmdet.models import Res2Net\n        >>> import torch\n        >>> self = Res2Net(depth=50, scales=4, base_width=26)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 32, 32)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        (1, 256, 8, 8)\n        (1, 512, 4, 4)\n        (1, 1024, 2, 2)\n        (1, 2048, 1, 1)\n    \"\"\"\n\n    arch_settings = {\n        50: (Bottle2neck, (3, 4, 6, 3)),\n        101: (Bottle2neck, (3, 4, 23, 3)),\n        152: (Bottle2neck, (3, 8, 36, 3))\n    }\n\n    def __init__(self,\n                 scales=4,\n                 base_width=26,\n                 style='pytorch',\n                 deep_stem=True,\n                 avg_down=True,\n                 pretrained=None,\n                 init_cfg=None,\n                 **kwargs):\n        self.scales = scales\n        self.base_width = base_width\n        super(Res2Net, self).__init__(\n            style='pytorch',\n            deep_stem=True,\n            avg_down=True,\n            pretrained=pretrained,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def make_res_layer(self, **kwargs):\n        return Res2Layer(\n            scales=self.scales,\n            base_width=self.base_width,\n            base_channels=self.base_channels,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/resnest.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import BACKBONES\nfrom ..utils import ResLayer\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .resnet import ResNetV1d\n\n\nclass RSoftmax(nn.Module):\n    \"\"\"Radix Softmax module in ``SplitAttentionConv2d``.\n\n    Args:\n        radix (int): Radix of input.\n        groups (int): Groups of input.\n    \"\"\"\n\n    def __init__(self, radix, groups):\n        super().__init__()\n        self.radix = radix\n        self.groups = groups\n\n    def forward(self, x):\n        batch = x.size(0)\n        if self.radix > 1:\n            x = x.view(batch, self.groups, self.radix, -1).transpose(1, 2)\n            x = F.softmax(x, dim=1)\n            x = x.reshape(batch, -1)\n        else:\n            x = torch.sigmoid(x)\n        return x\n\n\nclass SplitAttentionConv2d(BaseModule):\n    \"\"\"Split-Attention Conv2d in ResNeSt.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        channels (int): Number of intermediate channels.\n        kernel_size (int | tuple[int]): Size of the convolution kernel.\n        stride (int | tuple[int]): Stride of the convolution.\n        padding (int | tuple[int]): Zero-padding added to both sides of\n        dilation (int | tuple[int]): Spacing between kernel elements.\n        groups (int): Number of blocked connections from input channels to\n            output channels.\n        groups (int): Same as nn.Conv2d.\n        radix (int): Radix of SpltAtConv2d. Default: 2\n        reduction_factor (int): Reduction factor of inter_channels. Default: 4.\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        dcn (dict): Config dict for DCN. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 channels,\n                 kernel_size,\n                 stride=1,\n                 padding=0,\n                 dilation=1,\n                 groups=1,\n                 radix=2,\n                 reduction_factor=4,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 dcn=None,\n                 init_cfg=None):\n        super(SplitAttentionConv2d, self).__init__(init_cfg)\n        inter_channels = max(in_channels * radix // reduction_factor, 32)\n        self.radix = radix\n        self.groups = groups\n        self.channels = channels\n        self.with_dcn = dcn is not None\n        self.dcn = dcn\n        fallback_on_stride = False\n        if self.with_dcn:\n            fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\n        if self.with_dcn and not fallback_on_stride:\n            assert conv_cfg is None, 'conv_cfg must be None for DCN'\n            conv_cfg = dcn\n        self.conv = build_conv_layer(\n            conv_cfg,\n            in_channels,\n            channels * radix,\n            kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            groups=groups * radix,\n            bias=False)\n        # To be consistent with original implementation, starting from 0\n        self.norm0_name, norm0 = build_norm_layer(\n            norm_cfg, channels * radix, postfix=0)\n        self.add_module(self.norm0_name, norm0)\n        self.relu = nn.ReLU(inplace=True)\n        self.fc1 = build_conv_layer(\n            None, channels, inter_channels, 1, groups=self.groups)\n        self.norm1_name, norm1 = build_norm_layer(\n            norm_cfg, inter_channels, postfix=1)\n        self.add_module(self.norm1_name, norm1)\n        self.fc2 = build_conv_layer(\n            None, inter_channels, channels * radix, 1, groups=self.groups)\n        self.rsoftmax = RSoftmax(radix, groups)\n\n    @property\n    def norm0(self):\n        \"\"\"nn.Module: the normalization layer named \"norm0\" \"\"\"\n        return getattr(self, self.norm0_name)\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: the normalization layer named \"norm1\" \"\"\"\n        return getattr(self, self.norm1_name)\n\n    def forward(self, x):\n        x = self.conv(x)\n        x = self.norm0(x)\n        x = self.relu(x)\n\n        batch, rchannel = x.shape[:2]\n        batch = x.size(0)\n        if self.radix > 1:\n            splits = x.view(batch, self.radix, -1, *x.shape[2:])\n            gap = splits.sum(dim=1)\n        else:\n            gap = x\n        gap = F.adaptive_avg_pool2d(gap, 1)\n        gap = self.fc1(gap)\n\n        gap = self.norm1(gap)\n        gap = self.relu(gap)\n\n        atten = self.fc2(gap)\n        atten = self.rsoftmax(atten).view(batch, -1, 1, 1)\n\n        if self.radix > 1:\n            attens = atten.view(batch, self.radix, -1, *atten.shape[2:])\n            out = torch.sum(attens * splits, dim=1)\n        else:\n            out = atten * x\n        return out.contiguous()\n\n\nclass Bottleneck(_Bottleneck):\n    \"\"\"Bottleneck block for ResNeSt.\n\n    Args:\n        inplane (int): Input planes of this block.\n        planes (int): Middle planes of this block.\n        groups (int): Groups of conv2.\n        base_width (int): Base of width in terms of base channels. Default: 4.\n        base_channels (int): Base of channels for calculating width.\n            Default: 64.\n        radix (int): Radix of SpltAtConv2d. Default: 2\n        reduction_factor (int): Reduction factor of inter_channels in\n            SplitAttentionConv2d. Default: 4.\n        avg_down_stride (bool): Whether to use average pool for stride in\n            Bottleneck. Default: True.\n        kwargs (dict): Key word arguments for base class.\n    \"\"\"\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 groups=1,\n                 base_width=4,\n                 base_channels=64,\n                 radix=2,\n                 reduction_factor=4,\n                 avg_down_stride=True,\n                 **kwargs):\n        \"\"\"Bottleneck block for ResNeSt.\"\"\"\n        super(Bottleneck, self).__init__(inplanes, planes, **kwargs)\n\n        if groups == 1:\n            width = self.planes\n        else:\n            width = math.floor(self.planes *\n                               (base_width / base_channels)) * groups\n\n        self.avg_down_stride = avg_down_stride and self.conv2_stride > 1\n\n        self.norm1_name, norm1 = build_norm_layer(\n            self.norm_cfg, width, postfix=1)\n        self.norm3_name, norm3 = build_norm_layer(\n            self.norm_cfg, self.planes * self.expansion, postfix=3)\n\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            self.inplanes,\n            width,\n            kernel_size=1,\n            stride=self.conv1_stride,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n        self.with_modulated_dcn = False\n        self.conv2 = SplitAttentionConv2d(\n            width,\n            width,\n            kernel_size=3,\n            stride=1 if self.avg_down_stride else self.conv2_stride,\n            padding=self.dilation,\n            dilation=self.dilation,\n            groups=groups,\n            radix=radix,\n            reduction_factor=reduction_factor,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg,\n            dcn=self.dcn)\n        delattr(self, self.norm2_name)\n\n        if self.avg_down_stride:\n            self.avd_layer = nn.AvgPool2d(3, self.conv2_stride, padding=1)\n\n        self.conv3 = build_conv_layer(\n            self.conv_cfg,\n            width,\n            self.planes * self.expansion,\n            kernel_size=1,\n            bias=False)\n        self.add_module(self.norm3_name, norm3)\n\n    def forward(self, x):\n\n        def _inner_forward(x):\n            identity = x\n\n            out = self.conv1(x)\n            out = self.norm1(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n            out = self.conv2(out)\n\n            if self.avg_down_stride:\n                out = self.avd_layer(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n            out = self.conv3(out)\n            out = self.norm3(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n            if self.downsample is not None:\n                identity = self.downsample(x)\n\n            out += identity\n\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        out = self.relu(out)\n\n        return out\n\n\n@BACKBONES.register_module()\nclass ResNeSt(ResNetV1d):\n    \"\"\"ResNeSt backbone.\n\n    Args:\n        groups (int): Number of groups of Bottleneck. Default: 1\n        base_width (int): Base width of Bottleneck. Default: 4\n        radix (int): Radix of SplitAttentionConv2d. Default: 2\n        reduction_factor (int): Reduction factor of inter_channels in\n            SplitAttentionConv2d. Default: 4.\n        avg_down_stride (bool): Whether to use average pool for stride in\n            Bottleneck. Default: True.\n        kwargs (dict): Keyword arguments for ResNet.\n    \"\"\"\n\n    arch_settings = {\n        50: (Bottleneck, (3, 4, 6, 3)),\n        101: (Bottleneck, (3, 4, 23, 3)),\n        152: (Bottleneck, (3, 8, 36, 3)),\n        200: (Bottleneck, (3, 24, 36, 3))\n    }\n\n    def __init__(self,\n                 groups=1,\n                 base_width=4,\n                 radix=2,\n                 reduction_factor=4,\n                 avg_down_stride=True,\n                 **kwargs):\n        self.groups = groups\n        self.base_width = base_width\n        self.radix = radix\n        self.reduction_factor = reduction_factor\n        self.avg_down_stride = avg_down_stride\n        super(ResNeSt, self).__init__(**kwargs)\n\n    def make_res_layer(self, **kwargs):\n        \"\"\"Pack all blocks in a stage into a ``ResLayer``.\"\"\"\n        return ResLayer(\n            groups=self.groups,\n            base_width=self.base_width,\n            base_channels=self.base_channels,\n            radix=self.radix,\n            reduction_factor=self.reduction_factor,\n            avg_down_stride=self.avg_down_stride,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/resnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer\nfrom mmcv.runner import BaseModule\nfrom torch.nn.modules.batchnorm import _BatchNorm\n\nfrom ..builder import BACKBONES\nfrom ..utils import ResLayer\n\n\nclass BasicBlock(BaseModule):\n    expansion = 1\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 stride=1,\n                 dilation=1,\n                 downsample=None,\n                 style='pytorch',\n                 with_cp=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 dcn=None,\n                 plugins=None,\n                 init_cfg=None):\n        super(BasicBlock, self).__init__(init_cfg)\n        assert dcn is None, 'Not implemented yet.'\n        assert plugins is None, 'Not implemented yet.'\n\n        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n\n        self.conv1 = build_conv_layer(\n            conv_cfg,\n            inplanes,\n            planes,\n            3,\n            stride=stride,\n            padding=dilation,\n            dilation=dilation,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n        self.conv2 = build_conv_layer(\n            conv_cfg, planes, planes, 3, padding=1, bias=False)\n        self.add_module(self.norm2_name, norm2)\n\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n        self.dilation = dilation\n        self.with_cp = with_cp\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n        return getattr(self, self.norm1_name)\n\n    @property\n    def norm2(self):\n        \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n        return getattr(self, self.norm2_name)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n\n        def _inner_forward(x):\n            identity = x\n\n            out = self.conv1(x)\n            out = self.norm1(out)\n            out = self.relu(out)\n\n            out = self.conv2(out)\n            out = self.norm2(out)\n\n            if self.downsample is not None:\n                identity = self.downsample(x)\n\n            out += identity\n\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        out = self.relu(out)\n\n        return out\n\n\nclass Bottleneck(BaseModule):\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 stride=1,\n                 dilation=1,\n                 downsample=None,\n                 style='pytorch',\n                 with_cp=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 dcn=None,\n                 plugins=None,\n                 init_cfg=None):\n        \"\"\"Bottleneck block for ResNet.\n\n        If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\n        it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n        \"\"\"\n        super(Bottleneck, self).__init__(init_cfg)\n        assert style in ['pytorch', 'caffe']\n        assert dcn is None or isinstance(dcn, dict)\n        assert plugins is None or isinstance(plugins, list)\n        if plugins is not None:\n            allowed_position = ['after_conv1', 'after_conv2', 'after_conv3']\n            assert all(p['position'] in allowed_position for p in plugins)\n\n        self.inplanes = inplanes\n        self.planes = planes\n        self.stride = stride\n        self.dilation = dilation\n        self.style = style\n        self.with_cp = with_cp\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.dcn = dcn\n        self.with_dcn = dcn is not None\n        self.plugins = plugins\n        self.with_plugins = plugins is not None\n\n        if self.with_plugins:\n            # collect plugins for conv1/conv2/conv3\n            self.after_conv1_plugins = [\n                plugin['cfg'] for plugin in plugins\n                if plugin['position'] == 'after_conv1'\n            ]\n            self.after_conv2_plugins = [\n                plugin['cfg'] for plugin in plugins\n                if plugin['position'] == 'after_conv2'\n            ]\n            self.after_conv3_plugins = [\n                plugin['cfg'] for plugin in plugins\n                if plugin['position'] == 'after_conv3'\n            ]\n\n        if self.style == 'pytorch':\n            self.conv1_stride = 1\n            self.conv2_stride = stride\n        else:\n            self.conv1_stride = stride\n            self.conv2_stride = 1\n\n        self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)\n        self.norm2_name, norm2 = build_norm_layer(norm_cfg, planes, postfix=2)\n        self.norm3_name, norm3 = build_norm_layer(\n            norm_cfg, planes * self.expansion, postfix=3)\n\n        self.conv1 = build_conv_layer(\n            conv_cfg,\n            inplanes,\n            planes,\n            kernel_size=1,\n            stride=self.conv1_stride,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n        fallback_on_stride = False\n        if self.with_dcn:\n            fallback_on_stride = dcn.pop('fallback_on_stride', False)\n        if not self.with_dcn or fallback_on_stride:\n            self.conv2 = build_conv_layer(\n                conv_cfg,\n                planes,\n                planes,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=dilation,\n                dilation=dilation,\n                bias=False)\n        else:\n            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n            self.conv2 = build_conv_layer(\n                dcn,\n                planes,\n                planes,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=dilation,\n                dilation=dilation,\n                bias=False)\n\n        self.add_module(self.norm2_name, norm2)\n        self.conv3 = build_conv_layer(\n            conv_cfg,\n            planes,\n            planes * self.expansion,\n            kernel_size=1,\n            bias=False)\n        self.add_module(self.norm3_name, norm3)\n\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n\n        if self.with_plugins:\n            self.after_conv1_plugin_names = self.make_block_plugins(\n                planes, self.after_conv1_plugins)\n            self.after_conv2_plugin_names = self.make_block_plugins(\n                planes, self.after_conv2_plugins)\n            self.after_conv3_plugin_names = self.make_block_plugins(\n                planes * self.expansion, self.after_conv3_plugins)\n\n    def make_block_plugins(self, in_channels, plugins):\n        \"\"\"make plugins for block.\n\n        Args:\n            in_channels (int): Input channels of plugin.\n            plugins (list[dict]): List of plugins cfg to build.\n\n        Returns:\n            list[str]: List of the names of plugin.\n        \"\"\"\n        assert isinstance(plugins, list)\n        plugin_names = []\n        for plugin in plugins:\n            plugin = plugin.copy()\n            name, layer = build_plugin_layer(\n                plugin,\n                in_channels=in_channels,\n                postfix=plugin.pop('postfix', ''))\n            assert not hasattr(self, name), f'duplicate plugin {name}'\n            self.add_module(name, layer)\n            plugin_names.append(name)\n        return plugin_names\n\n    def forward_plugin(self, x, plugin_names):\n        out = x\n        for name in plugin_names:\n            out = getattr(self, name)(out)\n        return out\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n        return getattr(self, self.norm1_name)\n\n    @property\n    def norm2(self):\n        \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n        return getattr(self, self.norm2_name)\n\n    @property\n    def norm3(self):\n        \"\"\"nn.Module: normalization layer after the third convolution layer\"\"\"\n        return getattr(self, self.norm3_name)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n\n        def _inner_forward(x):\n            identity = x\n            out = self.conv1(x)\n            out = self.norm1(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv1_plugin_names)\n\n            out = self.conv2(out)\n            out = self.norm2(out)\n            out = self.relu(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv2_plugin_names)\n\n            out = self.conv3(out)\n            out = self.norm3(out)\n\n            if self.with_plugins:\n                out = self.forward_plugin(out, self.after_conv3_plugin_names)\n\n            if self.downsample is not None:\n                identity = self.downsample(x)\n\n            out += identity\n\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        out = self.relu(out)\n\n        return out\n\n\n@BACKBONES.register_module()\nclass ResNet(BaseModule):\n    \"\"\"ResNet backbone.\n\n    Args:\n        depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n        stem_channels (int | None): Number of stem channels. If not specified,\n            it will be the same as `base_channels`. Default: None.\n        base_channels (int): Number of base channels of res layer. Default: 64.\n        in_channels (int): Number of input image channels. Default: 3.\n        num_stages (int): Resnet stages. Default: 4.\n        strides (Sequence[int]): Strides of the first block of each stage.\n        dilations (Sequence[int]): Dilation of each stage.\n        out_indices (Sequence[int]): Output from which stages.\n        style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n            layer is the 3x3 conv layer, otherwise the stride-two layer is\n            the first 1x1 conv layer.\n        deep_stem (bool): Replace 7x7 conv in input stem with 3 3x3 conv\n        avg_down (bool): Use AvgPool instead of stride conv when\n            downsampling in the bottleneck.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            -1 means not freezing any parameters.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        plugins (list[dict]): List of plugins for stages, each dict contains:\n\n            - cfg (dict, required): Cfg dict to build plugin.\n            - position (str, required): Position inside block to insert\n              plugin, options are 'after_conv1', 'after_conv2', 'after_conv3'.\n            - stages (tuple[bool], optional): Stages to apply plugin, length\n              should be same as 'num_stages'.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed.\n        zero_init_residual (bool): Whether to use zero init for last norm layer\n            in resblocks to let them behave as identity.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Example:\n        >>> from mmdet.models import ResNet\n        >>> import torch\n        >>> self = ResNet(depth=18)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 32, 32)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        (1, 64, 8, 8)\n        (1, 128, 4, 4)\n        (1, 256, 2, 2)\n        (1, 512, 1, 1)\n    \"\"\"\n\n    arch_settings = {\n        18: (BasicBlock, (2, 2, 2, 2)),\n        34: (BasicBlock, (3, 4, 6, 3)),\n        50: (Bottleneck, (3, 4, 6, 3)),\n        101: (Bottleneck, (3, 4, 23, 3)),\n        152: (Bottleneck, (3, 8, 36, 3))\n    }\n\n    def __init__(self,\n                 depth,\n                 in_channels=3,\n                 stem_channels=None,\n                 base_channels=64,\n                 num_stages=4,\n                 strides=(1, 2, 2, 2),\n                 dilations=(1, 1, 1, 1),\n                 out_indices=(0, 1, 2, 3),\n                 style='pytorch',\n                 deep_stem=False,\n                 avg_down=False,\n                 frozen_stages=-1,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 norm_eval=True,\n                 dcn=None,\n                 stage_with_dcn=(False, False, False, False),\n                 plugins=None,\n                 with_cp=False,\n                 zero_init_residual=True,\n                 pretrained=None,\n                 init_cfg=None):\n        super(ResNet, self).__init__(init_cfg)\n        self.zero_init_residual = zero_init_residual\n        if depth not in self.arch_settings:\n            raise KeyError(f'invalid depth {depth} for resnet')\n\n        block_init_cfg = None\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n                block = self.arch_settings[depth][0]\n                if self.zero_init_residual:\n                    if block is BasicBlock:\n                        block_init_cfg = dict(\n                            type='Constant',\n                            val=0,\n                            override=dict(name='norm2'))\n                    elif block is Bottleneck:\n                        block_init_cfg = dict(\n                            type='Constant',\n                            val=0,\n                            override=dict(name='norm3'))\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        self.depth = depth\n        if stem_channels is None:\n            stem_channels = base_channels\n        self.stem_channels = stem_channels\n        self.base_channels = base_channels\n        self.num_stages = num_stages\n        assert num_stages >= 1 and num_stages <= 4\n        self.strides = strides\n        self.dilations = dilations\n        assert len(strides) == len(dilations) == num_stages\n        self.out_indices = out_indices\n        assert max(out_indices) < num_stages\n        self.style = style\n        self.deep_stem = deep_stem\n        self.avg_down = avg_down\n        self.frozen_stages = frozen_stages\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.with_cp = with_cp\n        self.norm_eval = norm_eval\n        self.dcn = dcn\n        self.stage_with_dcn = stage_with_dcn\n        if dcn is not None:\n            assert len(stage_with_dcn) == num_stages\n        self.plugins = plugins\n        self.block, stage_blocks = self.arch_settings[depth]\n        self.stage_blocks = stage_blocks[:num_stages]\n        self.inplanes = stem_channels\n\n        self._make_stem_layer(in_channels, stem_channels)\n\n        self.res_layers = []\n        for i, num_blocks in enumerate(self.stage_blocks):\n            stride = strides[i]\n            dilation = dilations[i]\n            dcn = self.dcn if self.stage_with_dcn[i] else None\n            if plugins is not None:\n                stage_plugins = self.make_stage_plugins(plugins, i)\n            else:\n                stage_plugins = None\n            planes = base_channels * 2**i\n            res_layer = self.make_res_layer(\n                block=self.block,\n                inplanes=self.inplanes,\n                planes=planes,\n                num_blocks=num_blocks,\n                stride=stride,\n                dilation=dilation,\n                style=self.style,\n                avg_down=self.avg_down,\n                with_cp=with_cp,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                dcn=dcn,\n                plugins=stage_plugins,\n                init_cfg=block_init_cfg)\n            self.inplanes = planes * self.block.expansion\n            layer_name = f'layer{i + 1}'\n            self.add_module(layer_name, res_layer)\n            self.res_layers.append(layer_name)\n\n        self._freeze_stages()\n\n        self.feat_dim = self.block.expansion * base_channels * 2**(\n            len(self.stage_blocks) - 1)\n\n    def make_stage_plugins(self, plugins, stage_idx):\n        \"\"\"Make plugins for ResNet ``stage_idx`` th stage.\n\n        Currently we support to insert ``context_block``,\n        ``empirical_attention_block``, ``nonlocal_block`` into the backbone\n        like ResNet/ResNeXt. They could be inserted after conv1/conv2/conv3 of\n        Bottleneck.\n\n        An example of plugins format could be:\n\n        Examples:\n            >>> plugins=[\n            ...     dict(cfg=dict(type='xxx', arg1='xxx'),\n            ...          stages=(False, True, True, True),\n            ...          position='after_conv2'),\n            ...     dict(cfg=dict(type='yyy'),\n            ...          stages=(True, True, True, True),\n            ...          position='after_conv3'),\n            ...     dict(cfg=dict(type='zzz', postfix='1'),\n            ...          stages=(True, True, True, True),\n            ...          position='after_conv3'),\n            ...     dict(cfg=dict(type='zzz', postfix='2'),\n            ...          stages=(True, True, True, True),\n            ...          position='after_conv3')\n            ... ]\n            >>> self = ResNet(depth=18)\n            >>> stage_plugins = self.make_stage_plugins(plugins, 0)\n            >>> assert len(stage_plugins) == 3\n\n        Suppose ``stage_idx=0``, the structure of blocks in the stage would be:\n\n        .. code-block:: none\n\n            conv1-> conv2->conv3->yyy->zzz1->zzz2\n\n        Suppose 'stage_idx=1', the structure of blocks in the stage would be:\n\n        .. code-block:: none\n\n            conv1-> conv2->xxx->conv3->yyy->zzz1->zzz2\n\n        If stages is missing, the plugin would be applied to all stages.\n\n        Args:\n            plugins (list[dict]): List of plugins cfg to build. The postfix is\n                required if multiple same type plugins are inserted.\n            stage_idx (int): Index of stage to build\n\n        Returns:\n            list[dict]: Plugins for current stage\n        \"\"\"\n        stage_plugins = []\n        for plugin in plugins:\n            plugin = plugin.copy()\n            stages = plugin.pop('stages', None)\n            assert stages is None or len(stages) == self.num_stages\n            # whether to insert plugin into current stage\n            if stages is None or stages[stage_idx]:\n                stage_plugins.append(plugin)\n\n        return stage_plugins\n\n    def make_res_layer(self, **kwargs):\n        \"\"\"Pack all blocks in a stage into a ``ResLayer``.\"\"\"\n        return ResLayer(**kwargs)\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: the normalization layer named \"norm1\" \"\"\"\n        return getattr(self, self.norm1_name)\n\n    def _make_stem_layer(self, in_channels, stem_channels):\n        if self.deep_stem:\n            self.stem = nn.Sequential(\n                build_conv_layer(\n                    self.conv_cfg,\n                    in_channels,\n                    stem_channels // 2,\n                    kernel_size=3,\n                    stride=2,\n                    padding=1,\n                    bias=False),\n                build_norm_layer(self.norm_cfg, stem_channels // 2)[1],\n                nn.ReLU(inplace=True),\n                build_conv_layer(\n                    self.conv_cfg,\n                    stem_channels // 2,\n                    stem_channels // 2,\n                    kernel_size=3,\n                    stride=1,\n                    padding=1,\n                    bias=False),\n                build_norm_layer(self.norm_cfg, stem_channels // 2)[1],\n                nn.ReLU(inplace=True),\n                build_conv_layer(\n                    self.conv_cfg,\n                    stem_channels // 2,\n                    stem_channels,\n                    kernel_size=3,\n                    stride=1,\n                    padding=1,\n                    bias=False),\n                build_norm_layer(self.norm_cfg, stem_channels)[1],\n                nn.ReLU(inplace=True))\n        else:\n            self.conv1 = build_conv_layer(\n                self.conv_cfg,\n                in_channels,\n                stem_channels,\n                kernel_size=7,\n                stride=2,\n                padding=3,\n                bias=False)\n            self.norm1_name, norm1 = build_norm_layer(\n                self.norm_cfg, stem_channels, postfix=1)\n            self.add_module(self.norm1_name, norm1)\n            self.relu = nn.ReLU(inplace=True)\n        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            if self.deep_stem:\n                self.stem.eval()\n                for param in self.stem.parameters():\n                    param.requires_grad = False\n            else:\n                self.norm1.eval()\n                for m in [self.conv1, self.norm1]:\n                    for param in m.parameters():\n                        param.requires_grad = False\n\n        for i in range(1, self.frozen_stages + 1):\n            m = getattr(self, f'layer{i}')\n            m.eval()\n            for param in m.parameters():\n                param.requires_grad = False\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        if self.deep_stem:\n            x = self.stem(x)\n        else:\n            x = self.conv1(x)\n            x = self.norm1(x)\n            x = self.relu(x)\n        x = self.maxpool(x)\n        outs = []\n        for i, layer_name in enumerate(self.res_layers):\n            res_layer = getattr(self, layer_name)\n            x = res_layer(x)\n            if i in self.out_indices:\n                outs.append(x)\n        return tuple(outs)\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode while keep normalization layer\n        freezed.\"\"\"\n        super(ResNet, self).train(mode)\n        self._freeze_stages()\n        if mode and self.norm_eval:\n            for m in self.modules():\n                # trick: eval have effect on BatchNorm only\n                if isinstance(m, _BatchNorm):\n                    m.eval()\n\n\n@BACKBONES.register_module()\nclass ResNetV1d(ResNet):\n    r\"\"\"ResNetV1d variant described in `Bag of Tricks\n    <https://arxiv.org/pdf/1812.01187.pdf>`_.\n\n    Compared with default ResNet(ResNetV1b), ResNetV1d replaces the 7x7 conv in\n    the input stem with three 3x3 convs. And in the downsampling block, a 2x2\n    avg_pool with stride 2 is added before conv, whose stride is changed to 1.\n    \"\"\"\n\n    def __init__(self, **kwargs):\n        super(ResNetV1d, self).__init__(\n            deep_stem=True, avg_down=True, **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/resnext.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfrom ..builder import BACKBONES\nfrom ..utils import ResLayer\nfrom .resnet import Bottleneck as _Bottleneck\nfrom .resnet import ResNet\n\n\nclass Bottleneck(_Bottleneck):\n    expansion = 4\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 groups=1,\n                 base_width=4,\n                 base_channels=64,\n                 **kwargs):\n        \"\"\"Bottleneck block for ResNeXt.\n\n        If style is \"pytorch\", the stride-two layer is the 3x3 conv layer, if\n        it is \"caffe\", the stride-two layer is the first 1x1 conv layer.\n        \"\"\"\n        super(Bottleneck, self).__init__(inplanes, planes, **kwargs)\n\n        if groups == 1:\n            width = self.planes\n        else:\n            width = math.floor(self.planes *\n                               (base_width / base_channels)) * groups\n\n        self.norm1_name, norm1 = build_norm_layer(\n            self.norm_cfg, width, postfix=1)\n        self.norm2_name, norm2 = build_norm_layer(\n            self.norm_cfg, width, postfix=2)\n        self.norm3_name, norm3 = build_norm_layer(\n            self.norm_cfg, self.planes * self.expansion, postfix=3)\n\n        self.conv1 = build_conv_layer(\n            self.conv_cfg,\n            self.inplanes,\n            width,\n            kernel_size=1,\n            stride=self.conv1_stride,\n            bias=False)\n        self.add_module(self.norm1_name, norm1)\n        fallback_on_stride = False\n        self.with_modulated_dcn = False\n        if self.with_dcn:\n            fallback_on_stride = self.dcn.pop('fallback_on_stride', False)\n        if not self.with_dcn or fallback_on_stride:\n            self.conv2 = build_conv_layer(\n                self.conv_cfg,\n                width,\n                width,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                groups=groups,\n                bias=False)\n        else:\n            assert self.conv_cfg is None, 'conv_cfg must be None for DCN'\n            self.conv2 = build_conv_layer(\n                self.dcn,\n                width,\n                width,\n                kernel_size=3,\n                stride=self.conv2_stride,\n                padding=self.dilation,\n                dilation=self.dilation,\n                groups=groups,\n                bias=False)\n\n        self.add_module(self.norm2_name, norm2)\n        self.conv3 = build_conv_layer(\n            self.conv_cfg,\n            width,\n            self.planes * self.expansion,\n            kernel_size=1,\n            bias=False)\n        self.add_module(self.norm3_name, norm3)\n\n        if self.with_plugins:\n            self._del_block_plugins(self.after_conv1_plugin_names +\n                                    self.after_conv2_plugin_names +\n                                    self.after_conv3_plugin_names)\n            self.after_conv1_plugin_names = self.make_block_plugins(\n                width, self.after_conv1_plugins)\n            self.after_conv2_plugin_names = self.make_block_plugins(\n                width, self.after_conv2_plugins)\n            self.after_conv3_plugin_names = self.make_block_plugins(\n                self.planes * self.expansion, self.after_conv3_plugins)\n\n    def _del_block_plugins(self, plugin_names):\n        \"\"\"delete plugins for block if exist.\n\n        Args:\n            plugin_names (list[str]): List of plugins name to delete.\n        \"\"\"\n        assert isinstance(plugin_names, list)\n        for plugin_name in plugin_names:\n            del self._modules[plugin_name]\n\n\n@BACKBONES.register_module()\nclass ResNeXt(ResNet):\n    \"\"\"ResNeXt backbone.\n\n    Args:\n        depth (int): Depth of resnet, from {18, 34, 50, 101, 152}.\n        in_channels (int): Number of input image channels. Default: 3.\n        num_stages (int): Resnet stages. Default: 4.\n        groups (int): Group of resnext.\n        base_width (int): Base width of resnext.\n        strides (Sequence[int]): Strides of the first block of each stage.\n        dilations (Sequence[int]): Dilation of each stage.\n        out_indices (Sequence[int]): Output from which stages.\n        style (str): `pytorch` or `caffe`. If set to \"pytorch\", the stride-two\n            layer is the 3x3 conv layer, otherwise the stride-two layer is\n            the first 1x1 conv layer.\n        frozen_stages (int): Stages to be frozen (all param fixed). -1 means\n            not freezing any parameters.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n        norm_eval (bool): Whether to set norm layers to eval mode, namely,\n            freeze running stats (mean and var). Note: Effect on Batch Norm\n            and its variants only.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed.\n        zero_init_residual (bool): whether to use zero init for last norm layer\n            in resblocks to let them behave as identity.\n    \"\"\"\n\n    arch_settings = {\n        50: (Bottleneck, (3, 4, 6, 3)),\n        101: (Bottleneck, (3, 4, 23, 3)),\n        152: (Bottleneck, (3, 8, 36, 3))\n    }\n\n    def __init__(self, groups=1, base_width=4, **kwargs):\n        self.groups = groups\n        self.base_width = base_width\n        super(ResNeXt, self).__init__(**kwargs)\n\n    def make_res_layer(self, **kwargs):\n        \"\"\"Pack all blocks in a stage into a ``ResLayer``\"\"\"\n        return ResLayer(\n            groups=self.groups,\n            base_width=self.base_width,\n            base_channels=self.base_channels,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/backbones/ssd_vgg.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import VGG\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import BACKBONES\nfrom ..necks import ssd_neck\n\n\n@BACKBONES.register_module()\nclass SSDVGG(VGG, BaseModule):\n    \"\"\"VGG Backbone network for single-shot-detection.\n\n    Args:\n        depth (int): Depth of vgg, from {11, 13, 16, 19}.\n        with_last_pool (bool): Whether to add a pooling layer at the last\n            of the model\n        ceil_mode (bool): When True, will use `ceil` instead of `floor`\n            to compute the output shape.\n        out_indices (Sequence[int]): Output from which stages.\n        out_feature_indices (Sequence[int]): Output from which feature map.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n        input_size (int, optional): Deprecated argumment.\n            Width and height of input, from {300, 512}.\n        l2_norm_scale (float, optional) : Deprecated argumment.\n            L2 normalization layer init scale.\n\n    Example:\n        >>> self = SSDVGG(input_size=300, depth=11)\n        >>> self.eval()\n        >>> inputs = torch.rand(1, 3, 300, 300)\n        >>> level_outputs = self.forward(inputs)\n        >>> for level_out in level_outputs:\n        ...     print(tuple(level_out.shape))\n        (1, 1024, 19, 19)\n        (1, 512, 10, 10)\n        (1, 256, 5, 5)\n        (1, 256, 3, 3)\n        (1, 256, 1, 1)\n    \"\"\"\n    extra_setting = {\n        300: (256, 'S', 512, 128, 'S', 256, 128, 256, 128, 256),\n        512: (256, 'S', 512, 128, 'S', 256, 128, 'S', 256, 128, 'S', 256, 128),\n    }\n\n    def __init__(self,\n                 depth,\n                 with_last_pool=False,\n                 ceil_mode=True,\n                 out_indices=(3, 4),\n                 out_feature_indices=(22, 34),\n                 pretrained=None,\n                 init_cfg=None,\n                 input_size=None,\n                 l2_norm_scale=None):\n        # TODO: in_channels for mmcv.VGG\n        super(SSDVGG, self).__init__(\n            depth,\n            with_last_pool=with_last_pool,\n            ceil_mode=ceil_mode,\n            out_indices=out_indices)\n\n        self.features.add_module(\n            str(len(self.features)),\n            nn.MaxPool2d(kernel_size=3, stride=1, padding=1))\n        self.features.add_module(\n            str(len(self.features)),\n            nn.Conv2d(512, 1024, kernel_size=3, padding=6, dilation=6))\n        self.features.add_module(\n            str(len(self.features)), nn.ReLU(inplace=True))\n        self.features.add_module(\n            str(len(self.features)), nn.Conv2d(1024, 1024, kernel_size=1))\n        self.features.add_module(\n            str(len(self.features)), nn.ReLU(inplace=True))\n        self.out_feature_indices = out_feature_indices\n\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n\n        if init_cfg is not None:\n            self.init_cfg = init_cfg\n        elif isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            self.init_cfg = [\n                dict(type='Kaiming', layer='Conv2d'),\n                dict(type='Constant', val=1, layer='BatchNorm2d'),\n                dict(type='Normal', std=0.01, layer='Linear'),\n            ]\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        if input_size is not None:\n            warnings.warn('DeprecationWarning: input_size is deprecated')\n        if l2_norm_scale is not None:\n            warnings.warn('DeprecationWarning: l2_norm_scale in VGG is '\n                          'deprecated, it has been moved to SSDNeck.')\n\n    def init_weights(self, pretrained=None):\n        super(VGG, self).init_weights()\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        outs = []\n        for i, layer in enumerate(self.features):\n            x = layer(x)\n            if i in self.out_feature_indices:\n                outs.append(x)\n\n        if len(outs) == 1:\n            return outs[0]\n        else:\n            return tuple(outs)\n\n\nclass L2Norm(ssd_neck.L2Norm):\n\n    def __init__(self, **kwargs):\n        super(L2Norm, self).__init__(**kwargs)\n        warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py '\n                      'is deprecated, please use L2Norm in '\n                      'mmdet/models/necks/ssd_neck.py instead')\n"
  },
  {
    "path": "mmdet/models/backbones/swin.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom collections import OrderedDict\nfrom copy import deepcopy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_norm_layer, constant_init, trunc_normal_init\nfrom mmcv.cnn.bricks.transformer import FFN, build_dropout\nfrom mmcv.cnn.utils.weight_init import trunc_normal_\nfrom mmcv.runner import BaseModule, ModuleList, _load_checkpoint\nfrom mmcv.utils import to_2tuple\n\nfrom ...utils import get_root_logger\nfrom ..builder import BACKBONES\nfrom ..utils.ckpt_convert import swin_converter\nfrom ..utils.transformer import PatchEmbed, PatchMerging\n\n\nclass WindowMSA(BaseModule):\n    \"\"\"Window based multi-head self-attention (W-MSA) module with relative\n    position bias.\n\n    Args:\n        embed_dims (int): Number of input channels.\n        num_heads (int): Number of attention heads.\n        window_size (tuple[int]): The height and width of the window.\n        qkv_bias (bool, optional):  If True, add a learnable bias to q, k, v.\n            Default: True.\n        qk_scale (float | None, optional): Override default qk scale of\n            head_dim ** -0.5 if set. Default: None.\n        attn_drop_rate (float, optional): Dropout ratio of attention weight.\n            Default: 0.0\n        proj_drop_rate (float, optional): Dropout ratio of output. Default: 0.\n        init_cfg (dict | None, optional): The Config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 window_size,\n                 qkv_bias=True,\n                 qk_scale=None,\n                 attn_drop_rate=0.,\n                 proj_drop_rate=0.,\n                 init_cfg=None):\n\n        super().__init__()\n        self.embed_dims = embed_dims\n        self.window_size = window_size  # Wh, Ww\n        self.num_heads = num_heads\n        head_embed_dims = embed_dims // num_heads\n        self.scale = qk_scale or head_embed_dims**-0.5\n        self.init_cfg = init_cfg\n\n        # define a parameter table of relative position bias\n        self.relative_position_bias_table = nn.Parameter(\n            torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1),\n                        num_heads))  # 2*Wh-1 * 2*Ww-1, nH\n\n        # About 2x faster than original impl\n        Wh, Ww = self.window_size\n        rel_index_coords = self.double_step_seq(2 * Ww - 1, Wh, 1, Ww)\n        rel_position_index = rel_index_coords + rel_index_coords.T\n        rel_position_index = rel_position_index.flip(1).contiguous()\n        self.register_buffer('relative_position_index', rel_position_index)\n\n        self.qkv = nn.Linear(embed_dims, embed_dims * 3, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop_rate)\n        self.proj = nn.Linear(embed_dims, embed_dims)\n        self.proj_drop = nn.Dropout(proj_drop_rate)\n\n        self.softmax = nn.Softmax(dim=-1)\n\n    def init_weights(self):\n        trunc_normal_(self.relative_position_bias_table, std=0.02)\n\n    def forward(self, x, mask=None):\n        \"\"\"\n        Args:\n\n            x (tensor): input features with shape of (num_windows*B, N, C)\n            mask (tensor | None, Optional): mask with shape of (num_windows,\n                Wh*Ww, Wh*Ww), value should be between (-inf, 0].\n        \"\"\"\n        B, N, C = x.shape\n        qkv = self.qkv(x).reshape(B, N, 3, self.num_heads,\n                                  C // self.num_heads).permute(2, 0, 3, 1, 4)\n        # make torchscript happy (cannot use tensor as tuple)\n        q, k, v = qkv[0], qkv[1], qkv[2]\n\n        q = q * self.scale\n        attn = (q @ k.transpose(-2, -1))\n\n        relative_position_bias = self.relative_position_bias_table[\n            self.relative_position_index.view(-1)].view(\n                self.window_size[0] * self.window_size[1],\n                self.window_size[0] * self.window_size[1],\n                -1)  # Wh*Ww,Wh*Ww,nH\n        relative_position_bias = relative_position_bias.permute(\n            2, 0, 1).contiguous()  # nH, Wh*Ww, Wh*Ww\n        attn = attn + relative_position_bias.unsqueeze(0)\n\n        if mask is not None:\n            nW = mask.shape[0]\n            attn = attn.view(B // nW, nW, self.num_heads, N,\n                             N) + mask.unsqueeze(1).unsqueeze(0)\n            attn = attn.view(-1, self.num_heads, N, N)\n        attn = self.softmax(attn)\n\n        attn = self.attn_drop(attn)\n\n        x = (attn @ v).transpose(1, 2).reshape(B, N, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n        return x\n\n    @staticmethod\n    def double_step_seq(step1, len1, step2, len2):\n        seq1 = torch.arange(0, step1 * len1, step1)\n        seq2 = torch.arange(0, step2 * len2, step2)\n        return (seq1[:, None] + seq2[None, :]).reshape(1, -1)\n\n\nclass ShiftWindowMSA(BaseModule):\n    \"\"\"Shifted Window Multihead Self-Attention Module.\n\n    Args:\n        embed_dims (int): Number of input channels.\n        num_heads (int): Number of attention heads.\n        window_size (int): The height and width of the window.\n        shift_size (int, optional): The shift step of each window towards\n            right-bottom. If zero, act as regular window-msa. Defaults to 0.\n        qkv_bias (bool, optional): If True, add a learnable bias to q, k, v.\n            Default: True\n        qk_scale (float | None, optional): Override default qk scale of\n            head_dim ** -0.5 if set. Defaults: None.\n        attn_drop_rate (float, optional): Dropout ratio of attention weight.\n            Defaults: 0.\n        proj_drop_rate (float, optional): Dropout ratio of output.\n            Defaults: 0.\n        dropout_layer (dict, optional): The dropout_layer used before output.\n            Defaults: dict(type='DropPath', drop_prob=0.).\n        init_cfg (dict, optional): The extra config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 window_size,\n                 shift_size=0,\n                 qkv_bias=True,\n                 qk_scale=None,\n                 attn_drop_rate=0,\n                 proj_drop_rate=0,\n                 dropout_layer=dict(type='DropPath', drop_prob=0.),\n                 init_cfg=None):\n        super().__init__(init_cfg)\n\n        self.window_size = window_size\n        self.shift_size = shift_size\n        assert 0 <= self.shift_size < self.window_size\n\n        self.w_msa = WindowMSA(\n            embed_dims=embed_dims,\n            num_heads=num_heads,\n            window_size=to_2tuple(window_size),\n            qkv_bias=qkv_bias,\n            qk_scale=qk_scale,\n            attn_drop_rate=attn_drop_rate,\n            proj_drop_rate=proj_drop_rate,\n            init_cfg=None)\n\n        self.drop = build_dropout(dropout_layer)\n\n    def forward(self, query, hw_shape):\n        B, L, C = query.shape\n        H, W = hw_shape\n        assert L == H * W, 'input feature has wrong size'\n        query = query.view(B, H, W, C)\n\n        # pad feature maps to multiples of window size\n        pad_r = (self.window_size - W % self.window_size) % self.window_size\n        pad_b = (self.window_size - H % self.window_size) % self.window_size\n        query = F.pad(query, (0, 0, 0, pad_r, 0, pad_b))\n        H_pad, W_pad = query.shape[1], query.shape[2]\n\n        # cyclic shift\n        if self.shift_size > 0:\n            shifted_query = torch.roll(\n                query,\n                shifts=(-self.shift_size, -self.shift_size),\n                dims=(1, 2))\n\n            # calculate attention mask for SW-MSA\n            img_mask = torch.zeros((1, H_pad, W_pad, 1), device=query.device)\n            h_slices = (slice(0, -self.window_size),\n                        slice(-self.window_size,\n                              -self.shift_size), slice(-self.shift_size, None))\n            w_slices = (slice(0, -self.window_size),\n                        slice(-self.window_size,\n                              -self.shift_size), slice(-self.shift_size, None))\n            cnt = 0\n            for h in h_slices:\n                for w in w_slices:\n                    img_mask[:, h, w, :] = cnt\n                    cnt += 1\n\n            # nW, window_size, window_size, 1\n            mask_windows = self.window_partition(img_mask)\n            mask_windows = mask_windows.view(\n                -1, self.window_size * self.window_size)\n            attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n            attn_mask = attn_mask.masked_fill(attn_mask != 0,\n                                              float(-100.0)).masked_fill(\n                                                  attn_mask == 0, float(0.0))\n        else:\n            shifted_query = query\n            attn_mask = None\n\n        # nW*B, window_size, window_size, C\n        query_windows = self.window_partition(shifted_query)\n        # nW*B, window_size*window_size, C\n        query_windows = query_windows.view(-1, self.window_size**2, C)\n\n        # W-MSA/SW-MSA (nW*B, window_size*window_size, C)\n        attn_windows = self.w_msa(query_windows, mask=attn_mask)\n\n        # merge windows\n        attn_windows = attn_windows.view(-1, self.window_size,\n                                         self.window_size, C)\n\n        # B H' W' C\n        shifted_x = self.window_reverse(attn_windows, H_pad, W_pad)\n        # reverse cyclic shift\n        if self.shift_size > 0:\n            x = torch.roll(\n                shifted_x,\n                shifts=(self.shift_size, self.shift_size),\n                dims=(1, 2))\n        else:\n            x = shifted_x\n\n        if pad_r > 0 or pad_b:\n            x = x[:, :H, :W, :].contiguous()\n\n        x = x.view(B, H * W, C)\n\n        x = self.drop(x)\n        return x\n\n    def window_reverse(self, windows, H, W):\n        \"\"\"\n        Args:\n            windows: (num_windows*B, window_size, window_size, C)\n            H (int): Height of image\n            W (int): Width of image\n        Returns:\n            x: (B, H, W, C)\n        \"\"\"\n        window_size = self.window_size\n        B = int(windows.shape[0] / (H * W / window_size / window_size))\n        x = windows.view(B, H // window_size, W // window_size, window_size,\n                         window_size, -1)\n        x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n        return x\n\n    def window_partition(self, x):\n        \"\"\"\n        Args:\n            x: (B, H, W, C)\n        Returns:\n            windows: (num_windows*B, window_size, window_size, C)\n        \"\"\"\n        B, H, W, C = x.shape\n        window_size = self.window_size\n        x = x.view(B, H // window_size, window_size, W // window_size,\n                   window_size, C)\n        windows = x.permute(0, 1, 3, 2, 4, 5).contiguous()\n        windows = windows.view(-1, window_size, window_size, C)\n        return windows\n\n\nclass SwinBlock(BaseModule):\n    \"\"\"\"\n    Args:\n        embed_dims (int): The feature dimension.\n        num_heads (int): Parallel attention heads.\n        feedforward_channels (int): The hidden dimension for FFNs.\n        window_size (int, optional): The local window scale. Default: 7.\n        shift (bool, optional): whether to shift window or not. Default False.\n        qkv_bias (bool, optional): enable bias for qkv if True. Default: True.\n        qk_scale (float | None, optional): Override default qk scale of\n            head_dim ** -0.5 if set. Default: None.\n        drop_rate (float, optional): Dropout rate. Default: 0.\n        attn_drop_rate (float, optional): Attention dropout rate. Default: 0.\n        drop_path_rate (float, optional): Stochastic depth rate. Default: 0.\n        act_cfg (dict, optional): The config dict of activation function.\n            Default: dict(type='GELU').\n        norm_cfg (dict, optional): The config dict of normalization.\n            Default: dict(type='LN').\n        with_cp (bool, optional): Use checkpoint or not. Using checkpoint\n            will save some memory while slowing down the training speed.\n            Default: False.\n        init_cfg (dict | list | None, optional): The init config.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 feedforward_channels,\n                 window_size=7,\n                 shift=False,\n                 qkv_bias=True,\n                 qk_scale=None,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.,\n                 act_cfg=dict(type='GELU'),\n                 norm_cfg=dict(type='LN'),\n                 with_cp=False,\n                 init_cfg=None):\n\n        super(SwinBlock, self).__init__()\n\n        self.init_cfg = init_cfg\n        self.with_cp = with_cp\n\n        self.norm1 = build_norm_layer(norm_cfg, embed_dims)[1]\n        self.attn = ShiftWindowMSA(\n            embed_dims=embed_dims,\n            num_heads=num_heads,\n            window_size=window_size,\n            shift_size=window_size // 2 if shift else 0,\n            qkv_bias=qkv_bias,\n            qk_scale=qk_scale,\n            attn_drop_rate=attn_drop_rate,\n            proj_drop_rate=drop_rate,\n            dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),\n            init_cfg=None)\n\n        self.norm2 = build_norm_layer(norm_cfg, embed_dims)[1]\n        self.ffn = FFN(\n            embed_dims=embed_dims,\n            feedforward_channels=feedforward_channels,\n            num_fcs=2,\n            ffn_drop=drop_rate,\n            dropout_layer=dict(type='DropPath', drop_prob=drop_path_rate),\n            act_cfg=act_cfg,\n            add_identity=True,\n            init_cfg=None)\n\n    def forward(self, x, hw_shape):\n\n        def _inner_forward(x):\n            identity = x\n            x = self.norm1(x)\n            x = self.attn(x, hw_shape)\n\n            x = x + identity\n\n            identity = x\n            x = self.norm2(x)\n            x = self.ffn(x, identity=identity)\n\n            return x\n\n        if self.with_cp and x.requires_grad:\n            x = cp.checkpoint(_inner_forward, x)\n        else:\n            x = _inner_forward(x)\n\n        return x\n\n\nclass SwinBlockSequence(BaseModule):\n    \"\"\"Implements one stage in Swin Transformer.\n\n    Args:\n        embed_dims (int): The feature dimension.\n        num_heads (int): Parallel attention heads.\n        feedforward_channels (int): The hidden dimension for FFNs.\n        depth (int): The number of blocks in this stage.\n        window_size (int, optional): The local window scale. Default: 7.\n        qkv_bias (bool, optional): enable bias for qkv if True. Default: True.\n        qk_scale (float | None, optional): Override default qk scale of\n            head_dim ** -0.5 if set. Default: None.\n        drop_rate (float, optional): Dropout rate. Default: 0.\n        attn_drop_rate (float, optional): Attention dropout rate. Default: 0.\n        drop_path_rate (float | list[float], optional): Stochastic depth\n            rate. Default: 0.\n        downsample (BaseModule | None, optional): The downsample operation\n            module. Default: None.\n        act_cfg (dict, optional): The config dict of activation function.\n            Default: dict(type='GELU').\n        norm_cfg (dict, optional): The config dict of normalization.\n            Default: dict(type='LN').\n        with_cp (bool, optional): Use checkpoint or not. Using checkpoint\n            will save some memory while slowing down the training speed.\n            Default: False.\n        init_cfg (dict | list | None, optional): The init config.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 embed_dims,\n                 num_heads,\n                 feedforward_channels,\n                 depth,\n                 window_size=7,\n                 qkv_bias=True,\n                 qk_scale=None,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.,\n                 downsample=None,\n                 act_cfg=dict(type='GELU'),\n                 norm_cfg=dict(type='LN'),\n                 with_cp=False,\n                 init_cfg=None):\n        super().__init__(init_cfg=init_cfg)\n\n        if isinstance(drop_path_rate, list):\n            drop_path_rates = drop_path_rate\n            assert len(drop_path_rates) == depth\n        else:\n            drop_path_rates = [deepcopy(drop_path_rate) for _ in range(depth)]\n\n        self.blocks = ModuleList()\n        for i in range(depth):\n            block = SwinBlock(\n                embed_dims=embed_dims,\n                num_heads=num_heads,\n                feedforward_channels=feedforward_channels,\n                window_size=window_size,\n                shift=False if i % 2 == 0 else True,\n                qkv_bias=qkv_bias,\n                qk_scale=qk_scale,\n                drop_rate=drop_rate,\n                attn_drop_rate=attn_drop_rate,\n                drop_path_rate=drop_path_rates[i],\n                act_cfg=act_cfg,\n                norm_cfg=norm_cfg,\n                with_cp=with_cp,\n                init_cfg=None)\n            self.blocks.append(block)\n\n        self.downsample = downsample\n\n    def forward(self, x, hw_shape):\n        for block in self.blocks:\n            x = block(x, hw_shape)\n\n        if self.downsample:\n            x_down, down_hw_shape = self.downsample(x, hw_shape)\n            return x_down, down_hw_shape, x, hw_shape\n        else:\n            return x, hw_shape, x, hw_shape\n\n\n@BACKBONES.register_module()\nclass SwinTransformer(BaseModule):\n    \"\"\" Swin Transformer\n    A PyTorch implement of : `Swin Transformer:\n    Hierarchical Vision Transformer using Shifted Windows`  -\n        https://arxiv.org/abs/2103.14030\n\n    Inspiration from\n    https://github.com/microsoft/Swin-Transformer\n\n    Args:\n        pretrain_img_size (int | tuple[int]): The size of input image when\n            pretrain. Defaults: 224.\n        in_channels (int): The num of input channels.\n            Defaults: 3.\n        embed_dims (int): The feature dimension. Default: 96.\n        patch_size (int | tuple[int]): Patch size. Default: 4.\n        window_size (int): Window size. Default: 7.\n        mlp_ratio (int | float): Ratio of mlp hidden dim to embedding dim.\n            Default: 4.\n        depths (tuple[int]): Depths of each Swin Transformer stage.\n            Default: (2, 2, 6, 2).\n        num_heads (tuple[int]): Parallel attention heads of each Swin\n            Transformer stage. Default: (3, 6, 12, 24).\n        strides (tuple[int]): The patch merging or patch embedding stride of\n            each Swin Transformer stage. (In swin, we set kernel size equal to\n            stride.) Default: (4, 2, 2, 2).\n        out_indices (tuple[int]): Output from which stages.\n            Default: (0, 1, 2, 3).\n        qkv_bias (bool, optional): If True, add a learnable bias to query, key,\n            value. Default: True\n        qk_scale (float | None, optional): Override default qk scale of\n            head_dim ** -0.5 if set. Default: None.\n        patch_norm (bool): If add a norm layer for patch embed and patch\n            merging. Default: True.\n        drop_rate (float): Dropout rate. Defaults: 0.\n        attn_drop_rate (float): Attention dropout rate. Default: 0.\n        drop_path_rate (float): Stochastic depth rate. Defaults: 0.1.\n        use_abs_pos_embed (bool): If True, add absolute position embedding to\n            the patch embedding. Defaults: False.\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='GELU').\n        norm_cfg (dict): Config dict for normalization layer at\n            output of backone. Defaults: dict(type='LN').\n        with_cp (bool, optional): Use checkpoint or not. Using checkpoint\n            will save some memory while slowing down the training speed.\n            Default: False.\n        pretrained (str, optional): model pretrained path. Default: None.\n        convert_weights (bool): The flag indicates whether the\n            pre-trained model is from the original repo. We may need\n            to convert some keys to make it compatible.\n            Default: False.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            Default: -1 (-1 means not freezing any parameters).\n        init_cfg (dict, optional): The Config for initialization.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 pretrain_img_size=224,\n                 in_channels=3,\n                 embed_dims=96,\n                 patch_size=4,\n                 window_size=7,\n                 mlp_ratio=4,\n                 depths=(2, 2, 6, 2),\n                 num_heads=(3, 6, 12, 24),\n                 strides=(4, 2, 2, 2),\n                 out_indices=(0, 1, 2, 3),\n                 qkv_bias=True,\n                 qk_scale=None,\n                 patch_norm=True,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.1,\n                 use_abs_pos_embed=False,\n                 act_cfg=dict(type='GELU'),\n                 norm_cfg=dict(type='LN'),\n                 with_cp=False,\n                 pretrained=None,\n                 convert_weights=False,\n                 frozen_stages=-1,\n                 init_cfg=None):\n        self.convert_weights = convert_weights\n        self.frozen_stages = frozen_stages\n        if isinstance(pretrain_img_size, int):\n            pretrain_img_size = to_2tuple(pretrain_img_size)\n        elif isinstance(pretrain_img_size, tuple):\n            if len(pretrain_img_size) == 1:\n                pretrain_img_size = to_2tuple(pretrain_img_size[0])\n            assert len(pretrain_img_size) == 2, \\\n                f'The size of image should have length 1 or 2, ' \\\n                f'but got {len(pretrain_img_size)}'\n\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            self.init_cfg = init_cfg\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n        super(SwinTransformer, self).__init__(init_cfg=init_cfg)\n\n        num_layers = len(depths)\n        self.out_indices = out_indices\n        self.use_abs_pos_embed = use_abs_pos_embed\n\n        assert strides[0] == patch_size, 'Use non-overlapping patch embed.'\n\n        self.patch_embed = PatchEmbed(\n            in_channels=in_channels,\n            embed_dims=embed_dims,\n            conv_type='Conv2d',\n            kernel_size=patch_size,\n            stride=strides[0],\n            norm_cfg=norm_cfg if patch_norm else None,\n            init_cfg=None)\n\n        if self.use_abs_pos_embed:\n            patch_row = pretrain_img_size[0] // patch_size\n            patch_col = pretrain_img_size[1] // patch_size\n            self.absolute_pos_embed = nn.Parameter(\n                torch.zeros((1, embed_dims, patch_row, patch_col)))\n\n        self.drop_after_pos = nn.Dropout(p=drop_rate)\n\n        # set stochastic depth decay rule\n        total_depth = sum(depths)\n        dpr = [\n            x.item() for x in torch.linspace(0, drop_path_rate, total_depth)\n        ]\n\n        self.stages = ModuleList()\n        in_channels = embed_dims\n        for i in range(num_layers):\n            if i < num_layers - 1:\n                downsample = PatchMerging(\n                    in_channels=in_channels,\n                    out_channels=2 * in_channels,\n                    stride=strides[i + 1],\n                    norm_cfg=norm_cfg if patch_norm else None,\n                    init_cfg=None)\n            else:\n                downsample = None\n\n            stage = SwinBlockSequence(\n                embed_dims=in_channels,\n                num_heads=num_heads[i],\n                feedforward_channels=int(mlp_ratio * in_channels),\n                depth=depths[i],\n                window_size=window_size,\n                qkv_bias=qkv_bias,\n                qk_scale=qk_scale,\n                drop_rate=drop_rate,\n                attn_drop_rate=attn_drop_rate,\n                drop_path_rate=dpr[sum(depths[:i]):sum(depths[:i + 1])],\n                downsample=downsample,\n                act_cfg=act_cfg,\n                norm_cfg=norm_cfg,\n                with_cp=with_cp,\n                init_cfg=None)\n            self.stages.append(stage)\n            if downsample:\n                in_channels = downsample.out_channels\n\n        self.num_features = [int(embed_dims * 2**i) for i in range(num_layers)]\n        # Add a norm layer for each output\n        for i in out_indices:\n            layer = build_norm_layer(norm_cfg, self.num_features[i])[1]\n            layer_name = f'norm{i}'\n            self.add_module(layer_name, layer)\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode while keep layers freezed.\"\"\"\n        super(SwinTransformer, self).train(mode)\n        self._freeze_stages()\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            self.patch_embed.eval()\n            for param in self.patch_embed.parameters():\n                param.requires_grad = False\n            if self.use_abs_pos_embed:\n                self.absolute_pos_embed.requires_grad = False\n            self.drop_after_pos.eval()\n\n        for i in range(1, self.frozen_stages + 1):\n\n            if (i - 1) in self.out_indices:\n                norm_layer = getattr(self, f'norm{i-1}')\n                norm_layer.eval()\n                for param in norm_layer.parameters():\n                    param.requires_grad = False\n\n            m = self.stages[i - 1]\n            m.eval()\n            for param in m.parameters():\n                param.requires_grad = False\n\n    def init_weights(self):\n        logger = get_root_logger()\n        if self.init_cfg is None:\n            logger.warn(f'No pre-trained weights for '\n                        f'{self.__class__.__name__}, '\n                        f'training start from scratch')\n            if self.use_abs_pos_embed:\n                trunc_normal_(self.absolute_pos_embed, std=0.02)\n            for m in self.modules():\n                if isinstance(m, nn.Linear):\n                    trunc_normal_init(m, std=.02, bias=0.)\n                elif isinstance(m, nn.LayerNorm):\n                    constant_init(m, 1.0)\n        else:\n            assert 'checkpoint' in self.init_cfg, f'Only support ' \\\n                                                  f'specify `Pretrained` in ' \\\n                                                  f'`init_cfg` in ' \\\n                                                  f'{self.__class__.__name__} '\n            ckpt = _load_checkpoint(\n                self.init_cfg.checkpoint, logger=logger, map_location='cpu')\n            if 'state_dict' in ckpt:\n                _state_dict = ckpt['state_dict']\n            elif 'model' in ckpt:\n                _state_dict = ckpt['model']\n            else:\n                _state_dict = ckpt\n            if self.convert_weights:\n                # supported loading weight from original repo,\n                _state_dict = swin_converter(_state_dict)\n\n            state_dict = OrderedDict()\n            for k, v in _state_dict.items():\n                if k.startswith('backbone.'):\n                    state_dict[k[9:]] = v\n\n            # strip prefix of state_dict\n            if list(state_dict.keys())[0].startswith('module.'):\n                state_dict = {k[7:]: v for k, v in state_dict.items()}\n\n            # reshape absolute position embedding\n            if state_dict.get('absolute_pos_embed') is not None:\n                absolute_pos_embed = state_dict['absolute_pos_embed']\n                N1, L, C1 = absolute_pos_embed.size()\n                N2, C2, H, W = self.absolute_pos_embed.size()\n                if N1 != N2 or C1 != C2 or L != H * W:\n                    logger.warning('Error in loading absolute_pos_embed, pass')\n                else:\n                    state_dict['absolute_pos_embed'] = absolute_pos_embed.view(\n                        N2, H, W, C2).permute(0, 3, 1, 2).contiguous()\n\n            # interpolate position bias table if needed\n            relative_position_bias_table_keys = [\n                k for k in state_dict.keys()\n                if 'relative_position_bias_table' in k\n            ]\n            for table_key in relative_position_bias_table_keys:\n                table_pretrained = state_dict[table_key]\n                table_current = self.state_dict()[table_key]\n                L1, nH1 = table_pretrained.size()\n                L2, nH2 = table_current.size()\n                if nH1 != nH2:\n                    logger.warning(f'Error in loading {table_key}, pass')\n                elif L1 != L2:\n                    S1 = int(L1**0.5)\n                    S2 = int(L2**0.5)\n                    table_pretrained_resized = F.interpolate(\n                        table_pretrained.permute(1, 0).reshape(1, nH1, S1, S1),\n                        size=(S2, S2),\n                        mode='bicubic')\n                    state_dict[table_key] = table_pretrained_resized.view(\n                        nH2, L2).permute(1, 0).contiguous()\n\n            # load state_dict\n            self.load_state_dict(state_dict, False)\n\n    def forward(self, x):\n        x, hw_shape = self.patch_embed(x)\n\n        if self.use_abs_pos_embed:\n            h, w = self.absolute_pos_embed.shape[1:3]\n            if hw_shape[0] != h or hw_shape[1] != w:\n                absolute_pos_embed = F.interpolate(\n                    self.absolute_pos_embed,\n                    size=hw_shape,\n                    mode='bicubic',\n                    align_corners=False).flatten(2).transpose(1, 2)\n            else:\n                absolute_pos_embed = self.absolute_pos_embed.flatten(\n                    2).transpose(1, 2)\n            x = x + absolute_pos_embed\n        x = self.drop_after_pos(x)\n\n        outs = []\n        for i, stage in enumerate(self.stages):\n            x, hw_shape, out, out_hw_shape = stage(x, hw_shape)\n            if i in self.out_indices:\n                norm_layer = getattr(self, f'norm{i}')\n                out = norm_layer(out)\n                out = out.view(-1, *out_hw_shape,\n                               self.num_features[i]).permute(0, 3, 1,\n                                                             2).contiguous()\n                outs.append(out)\n\n        return outs\n"
  },
  {
    "path": "mmdet/models/backbones/trident_resnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmcv.runner import BaseModule\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.models.backbones.resnet import Bottleneck, ResNet\nfrom mmdet.models.builder import BACKBONES\n\n\nclass TridentConv(BaseModule):\n    \"\"\"Trident Convolution Module.\n\n    Args:\n        in_channels (int): Number of channels in input.\n        out_channels (int): Number of channels in output.\n        kernel_size (int): Size of convolution kernel.\n        stride (int, optional): Convolution stride. Default: 1.\n        trident_dilations (tuple[int, int, int], optional): Dilations of\n            different trident branch. Default: (1, 2, 3).\n        test_branch_idx (int, optional): In inference, all 3 branches will\n            be used if `test_branch_idx==-1`, otherwise only branch with\n            index `test_branch_idx` will be used. Default: 1.\n        bias (bool, optional): Whether to use bias in convolution or not.\n            Default: False.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_size,\n                 stride=1,\n                 trident_dilations=(1, 2, 3),\n                 test_branch_idx=1,\n                 bias=False,\n                 init_cfg=None):\n        super(TridentConv, self).__init__(init_cfg)\n        self.num_branch = len(trident_dilations)\n        self.with_bias = bias\n        self.test_branch_idx = test_branch_idx\n        self.stride = _pair(stride)\n        self.kernel_size = _pair(kernel_size)\n        self.paddings = _pair(trident_dilations)\n        self.dilations = trident_dilations\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.bias = bias\n\n        self.weight = nn.Parameter(\n            torch.Tensor(out_channels, in_channels, *self.kernel_size))\n        if bias:\n            self.bias = nn.Parameter(torch.Tensor(out_channels))\n        else:\n            self.bias = None\n\n    def extra_repr(self):\n        tmpstr = f'in_channels={self.in_channels}'\n        tmpstr += f', out_channels={self.out_channels}'\n        tmpstr += f', kernel_size={self.kernel_size}'\n        tmpstr += f', num_branch={self.num_branch}'\n        tmpstr += f', test_branch_idx={self.test_branch_idx}'\n        tmpstr += f', stride={self.stride}'\n        tmpstr += f', paddings={self.paddings}'\n        tmpstr += f', dilations={self.dilations}'\n        tmpstr += f', bias={self.bias}'\n        return tmpstr\n\n    def forward(self, inputs):\n        if self.training or self.test_branch_idx == -1:\n            outputs = [\n                F.conv2d(input, self.weight, self.bias, self.stride, padding,\n                         dilation) for input, dilation, padding in zip(\n                             inputs, self.dilations, self.paddings)\n            ]\n        else:\n            assert len(inputs) == 1\n            outputs = [\n                F.conv2d(inputs[0], self.weight, self.bias, self.stride,\n                         self.paddings[self.test_branch_idx],\n                         self.dilations[self.test_branch_idx])\n            ]\n\n        return outputs\n\n\n# Since TridentNet is defined over ResNet50 and ResNet101, here we\n# only support TridentBottleneckBlock.\nclass TridentBottleneck(Bottleneck):\n    \"\"\"BottleBlock for TridentResNet.\n\n    Args:\n        trident_dilations (tuple[int, int, int]): Dilations of different\n            trident branch.\n        test_branch_idx (int): In inference, all 3 branches will be used\n            if `test_branch_idx==-1`, otherwise only branch with index\n            `test_branch_idx` will be used.\n        concat_output (bool): Whether to concat the output list to a Tensor.\n            `True` only in the last Block.\n    \"\"\"\n\n    def __init__(self, trident_dilations, test_branch_idx, concat_output,\n                 **kwargs):\n\n        super(TridentBottleneck, self).__init__(**kwargs)\n        self.trident_dilations = trident_dilations\n        self.num_branch = len(trident_dilations)\n        self.concat_output = concat_output\n        self.test_branch_idx = test_branch_idx\n        self.conv2 = TridentConv(\n            self.planes,\n            self.planes,\n            kernel_size=3,\n            stride=self.conv2_stride,\n            bias=False,\n            trident_dilations=self.trident_dilations,\n            test_branch_idx=test_branch_idx,\n            init_cfg=dict(\n                type='Kaiming',\n                distribution='uniform',\n                mode='fan_in',\n                override=dict(name='conv2')))\n\n    def forward(self, x):\n\n        def _inner_forward(x):\n            num_branch = (\n                self.num_branch\n                if self.training or self.test_branch_idx == -1 else 1)\n            identity = x\n            if not isinstance(x, list):\n                x = (x, ) * num_branch\n                identity = x\n                if self.downsample is not None:\n                    identity = [self.downsample(b) for b in x]\n\n            out = [self.conv1(b) for b in x]\n            out = [self.norm1(b) for b in out]\n            out = [self.relu(b) for b in out]\n\n            if self.with_plugins:\n                for k in range(len(out)):\n                    out[k] = self.forward_plugin(out[k],\n                                                 self.after_conv1_plugin_names)\n\n            out = self.conv2(out)\n            out = [self.norm2(b) for b in out]\n            out = [self.relu(b) for b in out]\n            if self.with_plugins:\n                for k in range(len(out)):\n                    out[k] = self.forward_plugin(out[k],\n                                                 self.after_conv2_plugin_names)\n\n            out = [self.conv3(b) for b in out]\n            out = [self.norm3(b) for b in out]\n\n            if self.with_plugins:\n                for k in range(len(out)):\n                    out[k] = self.forward_plugin(out[k],\n                                                 self.after_conv3_plugin_names)\n\n            out = [\n                out_b + identity_b for out_b, identity_b in zip(out, identity)\n            ]\n            return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        out = [self.relu(b) for b in out]\n        if self.concat_output:\n            out = torch.cat(out, dim=0)\n        return out\n\n\ndef make_trident_res_layer(block,\n                           inplanes,\n                           planes,\n                           num_blocks,\n                           stride=1,\n                           trident_dilations=(1, 2, 3),\n                           style='pytorch',\n                           with_cp=False,\n                           conv_cfg=None,\n                           norm_cfg=dict(type='BN'),\n                           dcn=None,\n                           plugins=None,\n                           test_branch_idx=-1):\n    \"\"\"Build Trident Res Layers.\"\"\"\n\n    downsample = None\n    if stride != 1 or inplanes != planes * block.expansion:\n        downsample = []\n        conv_stride = stride\n        downsample.extend([\n            build_conv_layer(\n                conv_cfg,\n                inplanes,\n                planes * block.expansion,\n                kernel_size=1,\n                stride=conv_stride,\n                bias=False),\n            build_norm_layer(norm_cfg, planes * block.expansion)[1]\n        ])\n        downsample = nn.Sequential(*downsample)\n\n    layers = []\n    for i in range(num_blocks):\n        layers.append(\n            block(\n                inplanes=inplanes,\n                planes=planes,\n                stride=stride if i == 0 else 1,\n                trident_dilations=trident_dilations,\n                downsample=downsample if i == 0 else None,\n                style=style,\n                with_cp=with_cp,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                dcn=dcn,\n                plugins=plugins,\n                test_branch_idx=test_branch_idx,\n                concat_output=True if i == num_blocks - 1 else False))\n        inplanes = planes * block.expansion\n    return nn.Sequential(*layers)\n\n\n@BACKBONES.register_module()\nclass TridentResNet(ResNet):\n    \"\"\"The stem layer, stage 1 and stage 2 in Trident ResNet are identical to\n    ResNet, while in stage 3, Trident BottleBlock is utilized to replace the\n    normal BottleBlock to yield trident output. Different branch shares the\n    convolution weight but uses different dilations to achieve multi-scale\n    output.\n\n                               / stage3(b0) \\\n    x - stem - stage1 - stage2 - stage3(b1) - output\n                               \\ stage3(b2) /\n\n    Args:\n        depth (int): Depth of resnet, from {50, 101, 152}.\n        num_branch (int): Number of branches in TridentNet.\n        test_branch_idx (int): In inference, all 3 branches will be used\n            if `test_branch_idx==-1`, otherwise only branch with index\n            `test_branch_idx` will be used.\n        trident_dilations (tuple[int]): Dilations of different trident branch.\n            len(trident_dilations) should be equal to num_branch.\n    \"\"\"  # noqa\n\n    def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,\n                 **kwargs):\n\n        assert num_branch == len(trident_dilations)\n        assert depth in (50, 101, 152)\n        super(TridentResNet, self).__init__(depth, **kwargs)\n        assert self.num_stages == 3\n        self.test_branch_idx = test_branch_idx\n        self.num_branch = num_branch\n\n        last_stage_idx = self.num_stages - 1\n        stride = self.strides[last_stage_idx]\n        dilation = trident_dilations\n        dcn = self.dcn if self.stage_with_dcn[last_stage_idx] else None\n        if self.plugins is not None:\n            stage_plugins = self.make_stage_plugins(self.plugins,\n                                                    last_stage_idx)\n        else:\n            stage_plugins = None\n        planes = self.base_channels * 2**last_stage_idx\n        res_layer = make_trident_res_layer(\n            TridentBottleneck,\n            inplanes=(self.block.expansion * self.base_channels *\n                      2**(last_stage_idx - 1)),\n            planes=planes,\n            num_blocks=self.stage_blocks[last_stage_idx],\n            stride=stride,\n            trident_dilations=dilation,\n            style=self.style,\n            with_cp=self.with_cp,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg,\n            dcn=dcn,\n            plugins=stage_plugins,\n            test_branch_idx=self.test_branch_idx)\n\n        layer_name = f'layer{last_stage_idx + 1}'\n\n        self.__setattr__(layer_name, res_layer)\n        self.res_layers.pop(last_stage_idx)\n        self.res_layers.insert(last_stage_idx, layer_name)\n\n        self._freeze_stages()\n"
  },
  {
    "path": "mmdet/models/builder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nfrom mmcv.cnn import MODELS as MMCV_MODELS\nfrom mmcv.utils import Registry\n\nMODELS = Registry('models', parent=MMCV_MODELS)\n\nBACKBONES = MODELS\nNECKS = MODELS\nROI_EXTRACTORS = MODELS\nSHARED_HEADS = MODELS\nHEADS = MODELS\nLOSSES = MODELS\nDETECTORS = MODELS\n\n\ndef build_backbone(cfg):\n    \"\"\"Build backbone.\"\"\"\n    return BACKBONES.build(cfg)\n\n\ndef build_neck(cfg):\n    \"\"\"Build neck.\"\"\"\n    return NECKS.build(cfg)\n\n\ndef build_roi_extractor(cfg):\n    \"\"\"Build roi extractor.\"\"\"\n    return ROI_EXTRACTORS.build(cfg)\n\n\ndef build_shared_head(cfg):\n    \"\"\"Build shared head.\"\"\"\n    return SHARED_HEADS.build(cfg)\n\n\ndef build_head(cfg):\n    \"\"\"Build head.\"\"\"\n    return HEADS.build(cfg)\n\n\ndef build_loss(cfg):\n    \"\"\"Build loss.\"\"\"\n    return LOSSES.build(cfg)\n\n\ndef build_detector(cfg, train_cfg=None, test_cfg=None):\n    \"\"\"Build detector.\"\"\"\n    if train_cfg is not None or test_cfg is not None:\n        warnings.warn(\n            'train_cfg and test_cfg is deprecated, '\n            'please specify them in model', UserWarning)\n    assert cfg.get('train_cfg') is None or train_cfg is None, \\\n        'train_cfg specified in both outer field and model field '\n    assert cfg.get('test_cfg') is None or test_cfg is None, \\\n        'test_cfg specified in both outer field and model field '\n    return DETECTORS.build(\n        cfg, default_args=dict(train_cfg=train_cfg, test_cfg=test_cfg))\n"
  },
  {
    "path": "mmdet/models/dense_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .anchor_free_head import AnchorFreeHead\nfrom .anchor_head import AnchorHead\nfrom .ascend_anchor_head import AscendAnchorHead\nfrom .ascend_retina_head import AscendRetinaHead\nfrom .ascend_ssd_head import AscendSSDHead\nfrom .atss_head import ATSSHead\nfrom .autoassign_head import AutoAssignHead\nfrom .cascade_rpn_head import CascadeRPNHead, StageCascadeRPNHead\nfrom .centernet_head import CenterNetHead\nfrom .centripetal_head import CentripetalHead\nfrom .corner_head import CornerHead\nfrom .ddod_head import DDODHead\nfrom .deformable_detr_head import DeformableDETRHead\nfrom .detr_head import DETRHead\nfrom .embedding_rpn_head import EmbeddingRPNHead\nfrom .fcos_head import FCOSHead\nfrom .fovea_head import FoveaHead\nfrom .free_anchor_retina_head import FreeAnchorRetinaHead\nfrom .fsaf_head import FSAFHead\nfrom .ga_retina_head import GARetinaHead\nfrom .ga_rpn_head import GARPNHead\nfrom .gfl_head import GFLHead\nfrom .guided_anchor_head import FeatureAdaption, GuidedAnchorHead\nfrom .lad_head import LADHead\nfrom .ld_head import LDHead\nfrom .mask2former_head import Mask2FormerHead\nfrom .maskformer_head import MaskFormerHead\nfrom .nasfcos_head import NASFCOSHead\nfrom .paa_head import PAAHead\nfrom .pisa_retinanet_head import PISARetinaHead\nfrom .pisa_ssd_head import PISASSDHead\nfrom .reppoints_head import RepPointsHead\nfrom .retina_head import RetinaHead\nfrom .retina_sepbn_head import RetinaSepBNHead\nfrom .rpn_head import RPNHead\nfrom .sabl_retina_head import SABLRetinaHead\nfrom .solo_head import DecoupledSOLOHead, DecoupledSOLOLightHead, SOLOHead\nfrom .solov2_head import SOLOV2Head\nfrom .ssd_head import SSDHead\nfrom .tood_head import TOODHead\nfrom .vfnet_head import VFNetHead\nfrom .yolact_head import YOLACTHead, YOLACTProtonet, YOLACTSegmHead\nfrom .yolo_head import YOLOV3Head\nfrom .yolof_head import YOLOFHead\nfrom .yolox_head import YOLOXHead\n\n__all__ = [\n    'AnchorFreeHead', 'AnchorHead', 'GuidedAnchorHead', 'FeatureAdaption',\n    'RPNHead', 'GARPNHead', 'RetinaHead', 'RetinaSepBNHead', 'GARetinaHead',\n    'SSDHead', 'FCOSHead', 'RepPointsHead', 'FoveaHead',\n    'FreeAnchorRetinaHead', 'ATSSHead', 'FSAFHead', 'NASFCOSHead',\n    'PISARetinaHead', 'PISASSDHead', 'GFLHead', 'CornerHead', 'YOLACTHead',\n    'YOLACTSegmHead', 'YOLACTProtonet', 'YOLOV3Head', 'PAAHead',\n    'SABLRetinaHead', 'CentripetalHead', 'VFNetHead', 'StageCascadeRPNHead',\n    'CascadeRPNHead', 'EmbeddingRPNHead', 'LDHead', 'AutoAssignHead',\n    'DETRHead', 'YOLOFHead', 'DeformableDETRHead', 'SOLOHead',\n    'DecoupledSOLOHead', 'CenterNetHead', 'YOLOXHead',\n    'DecoupledSOLOLightHead', 'LADHead', 'TOODHead', 'MaskFormerHead',\n    'Mask2FormerHead', 'SOLOV2Head', 'DDODHead', 'AscendAnchorHead',\n    'AscendRetinaHead', 'AscendSSDHead'\n]\n"
  },
  {
    "path": "mmdet/models/dense_heads/anchor_free_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom abc import abstractmethod\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import build_bbox_coder, multi_apply\nfrom mmdet.core.anchor.point_generator import MlvlPointGenerator\nfrom ..builder import HEADS, build_loss\nfrom .base_dense_head import BaseDenseHead\nfrom .dense_test_mixins import BBoxTestMixin\n\n\n@HEADS.register_module()\nclass AnchorFreeHead(BaseDenseHead, BBoxTestMixin):\n    \"\"\"Anchor-free head (FCOS, Fovea, RepPoints, etc.).\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels. Used in child classes.\n        stacked_convs (int): Number of stacking convs of the head.\n        strides (tuple): Downsample factor of each feature map.\n        dcn_on_last_conv (bool): If true, use dcn in the last layer of\n            towers. Default: False.\n        conv_bias (bool | str): If specified as `auto`, it will be decided by\n            the norm_cfg. Bias of conv will be set as True if `norm_cfg` is\n            None, otherwise False. Default: \"auto\".\n        loss_cls (dict): Config of classification loss.\n        loss_bbox (dict): Config of localization loss.\n        bbox_coder (dict): Config of bbox coder. Defaults\n            'DistancePointBBoxCoder'.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        train_cfg (dict): Training config of anchor head.\n        test_cfg (dict): Testing config of anchor head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    _version = 1\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 feat_channels=256,\n                 stacked_convs=4,\n                 strides=(4, 8, 16, 32, 64),\n                 dcn_on_last_conv=False,\n                 conv_bias='auto',\n                 loss_cls=dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=1.0),\n                 loss_bbox=dict(type='IoULoss', loss_weight=1.0),\n                 bbox_coder=dict(type='DistancePointBBoxCoder'),\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='conv_cls',\n                         std=0.01,\n                         bias_prob=0.01))):\n        super(AnchorFreeHead, self).__init__(init_cfg)\n        self.num_classes = num_classes\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n        if self.use_sigmoid_cls:\n            self.cls_out_channels = num_classes\n        else:\n            self.cls_out_channels = num_classes + 1\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.stacked_convs = stacked_convs\n        self.strides = strides\n        self.dcn_on_last_conv = dcn_on_last_conv\n        assert conv_bias == 'auto' or isinstance(conv_bias, bool)\n        self.conv_bias = conv_bias\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_bbox = build_loss(loss_bbox)\n        self.bbox_coder = build_bbox_coder(bbox_coder)\n\n        self.prior_generator = MlvlPointGenerator(strides)\n\n        # In order to keep a more general interface and be consistent with\n        # anchor_head. We can think of point like one anchor\n        self.num_base_priors = self.prior_generator.num_base_priors[0]\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.fp16_enabled = False\n\n        self._init_layers()\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self._init_cls_convs()\n        self._init_reg_convs()\n        self._init_predictor()\n\n    def _init_cls_convs(self):\n        \"\"\"Initialize classification conv layers of the head.\"\"\"\n        self.cls_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            if self.dcn_on_last_conv and i == self.stacked_convs - 1:\n                conv_cfg = dict(type='DCNv2')\n            else:\n                conv_cfg = self.conv_cfg\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.conv_bias))\n\n    def _init_reg_convs(self):\n        \"\"\"Initialize bbox regression conv layers of the head.\"\"\"\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            if self.dcn_on_last_conv and i == self.stacked_convs - 1:\n                conv_cfg = dict(type='DCNv2')\n            else:\n                conv_cfg = self.conv_cfg\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.conv_bias))\n\n    def _init_predictor(self):\n        \"\"\"Initialize predictor layers of the head.\"\"\"\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n        self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n\n    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n                              missing_keys, unexpected_keys, error_msgs):\n        \"\"\"Hack some keys of the model state dict so that can load checkpoints\n        of previous version.\"\"\"\n        version = local_metadata.get('version', None)\n        if version is None:\n            # the key is different in early versions\n            # for example, 'fcos_cls' become 'conv_cls' now\n            bbox_head_keys = [\n                k for k in state_dict.keys() if k.startswith(prefix)\n            ]\n            ori_predictor_keys = []\n            new_predictor_keys = []\n            # e.g. 'fcos_cls' or 'fcos_reg'\n            for key in bbox_head_keys:\n                ori_predictor_keys.append(key)\n                key = key.split('.')\n                conv_name = None\n                if key[1].endswith('cls'):\n                    conv_name = 'conv_cls'\n                elif key[1].endswith('reg'):\n                    conv_name = 'conv_reg'\n                elif key[1].endswith('centerness'):\n                    conv_name = 'conv_centerness'\n                else:\n                    assert NotImplementedError\n                if conv_name is not None:\n                    key[1] = conv_name\n                    new_predictor_keys.append('.'.join(key))\n                else:\n                    ori_predictor_keys.pop(-1)\n            for i in range(len(new_predictor_keys)):\n                state_dict[new_predictor_keys[i]] = state_dict.pop(\n                    ori_predictor_keys[i])\n        super()._load_from_state_dict(state_dict, prefix, local_metadata,\n                                      strict, missing_keys, unexpected_keys,\n                                      error_msgs)\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually contain classification scores and bbox predictions.\n                cls_scores (list[Tensor]): Box scores for each scale level,\n                    each is a 4D-tensor, the channel number is\n                    num_points * num_classes.\n                bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                    level, each is a 4D-tensor, the channel number is\n                    num_points * 4.\n        \"\"\"\n        return multi_apply(self.forward_single, feats)[:2]\n\n    def forward_single(self, x):\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n\n        Returns:\n            tuple: Scores for each class, bbox predictions, features\n                after classification and regression conv layers, some\n                models needs these features like FCOS.\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n\n        for cls_layer in self.cls_convs:\n            cls_feat = cls_layer(cls_feat)\n        cls_score = self.conv_cls(cls_feat)\n\n        for reg_layer in self.reg_convs:\n            reg_feat = reg_layer(reg_feat)\n        bbox_pred = self.conv_reg(reg_feat)\n        return cls_score, bbox_pred, cls_feat, reg_feat\n\n    @abstractmethod\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute loss of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is\n                num_points * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_points * 4.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n        \"\"\"\n\n        raise NotImplementedError\n\n    @abstractmethod\n    def get_targets(self, points, gt_bboxes_list, gt_labels_list):\n        \"\"\"Compute regression, classification and centerness targets for points\n        in multiple images.\n\n        Args:\n            points (list[Tensor]): Points of each fpn level, each has shape\n                (num_points, 2).\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,\n                each has shape (num_gt, 4).\n            gt_labels_list (list[Tensor]): Ground truth labels of each box,\n                each has shape (num_gt,).\n        \"\"\"\n        raise NotImplementedError\n\n    def _get_points_single(self,\n                           featmap_size,\n                           stride,\n                           dtype,\n                           device,\n                           flatten=False):\n        \"\"\"Get points of a single scale level.\n\n        This function will be deprecated soon.\n        \"\"\"\n\n        warnings.warn(\n            '`_get_points_single` in `AnchorFreeHead` will be '\n            'deprecated soon, we support a multi level point generator now'\n            'you can get points of a single level feature map '\n            'with `self.prior_generator.single_level_grid_priors` ')\n\n        h, w = featmap_size\n        # First create Range with the default dtype, than convert to\n        # target `dtype` for onnx exporting.\n        x_range = torch.arange(w, device=device).to(dtype)\n        y_range = torch.arange(h, device=device).to(dtype)\n        y, x = torch.meshgrid(y_range, x_range)\n        if flatten:\n            y = y.flatten()\n            x = x.flatten()\n        return y, x\n\n    def get_points(self, featmap_sizes, dtype, device, flatten=False):\n        \"\"\"Get points according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            dtype (torch.dtype): Type of points.\n            device (torch.device): Device of points.\n\n        Returns:\n            tuple: points of each image.\n        \"\"\"\n        warnings.warn(\n            '`get_points` in `AnchorFreeHead` will be '\n            'deprecated soon, we support a multi level point generator now'\n            'you can get points of all levels '\n            'with `self.prior_generator.grid_priors` ')\n\n        mlvl_points = []\n        for i in range(len(featmap_sizes)):\n            mlvl_points.append(\n                self._get_points_single(featmap_sizes[i], self.strides[i],\n                                        dtype, device, flatten))\n        return mlvl_points\n\n    def aug_test(self, feats, img_metas, rescale=False):\n        \"\"\"Test function with test time augmentation.\n\n        Args:\n            feats (list[Tensor]): the outer list indicates test-time\n                augmentations and inner Tensor should have a shape NxCxHxW,\n                which contains features for all images in the batch.\n            img_metas (list[list[dict]]): the outer list indicates test-time\n                augs (multiscale, flip, etc.) and the inner list indicates\n                images in a batch. each dict has image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[ndarray]: bbox results of each class\n        \"\"\"\n        return self.aug_test_bboxes(feats, img_metas, rescale=rescale)\n"
  },
  {
    "path": "mmdet/models/dense_heads/anchor_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (anchor_inside_flags, build_assigner, build_bbox_coder,\n                        build_prior_generator, build_sampler, images_to_levels,\n                        multi_apply, unmap)\nfrom ..builder import HEADS, build_loss\nfrom .base_dense_head import BaseDenseHead\nfrom .dense_test_mixins import BBoxTestMixin\n\n\n@HEADS.register_module()\nclass AnchorHead(BaseDenseHead, BBoxTestMixin):\n    \"\"\"Anchor-based head (RPN, RetinaNet, SSD, etc.).\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels. Used in child classes.\n        anchor_generator (dict): Config dict for anchor generator\n        bbox_coder (dict): Config of bounding box coder.\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Default False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        loss_cls (dict): Config of classification loss.\n        loss_bbox (dict): Config of localization loss.\n        train_cfg (dict): Training config of anchor head.\n        test_cfg (dict): Testing config of anchor head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 feat_channels=256,\n                 anchor_generator=dict(\n                     type='AnchorGenerator',\n                     scales=[8, 16, 32],\n                     ratios=[0.5, 1.0, 2.0],\n                     strides=[4, 8, 16, 32, 64]),\n                 bbox_coder=dict(\n                     type='DeltaXYWHBBoxCoder',\n                     clip_border=True,\n                     target_means=(.0, .0, .0, .0),\n                     target_stds=(1.0, 1.0, 1.0, 1.0)),\n                 reg_decoded_bbox=False,\n                 loss_cls=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 loss_bbox=dict(\n                     type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=dict(type='Normal', layer='Conv2d', std=0.01)):\n        super(AnchorHead, self).__init__(init_cfg)\n        self.in_channels = in_channels\n        self.num_classes = num_classes\n        self.feat_channels = feat_channels\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n        if self.use_sigmoid_cls:\n            self.cls_out_channels = num_classes\n        else:\n            self.cls_out_channels = num_classes + 1\n\n        if self.cls_out_channels <= 0:\n            raise ValueError(f'num_classes={num_classes} is too small')\n        self.reg_decoded_bbox = reg_decoded_bbox\n\n        self.bbox_coder = build_bbox_coder(bbox_coder)\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_bbox = build_loss(loss_bbox)\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        if self.train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            if hasattr(self.train_cfg,\n                       'sampler') and self.train_cfg.sampler.type.split(\n                           '.')[-1] != 'PseudoSampler':\n                self.sampling = True\n                sampler_cfg = self.train_cfg.sampler\n                # avoid BC-breaking\n                if loss_cls['type'] in [\n                        'FocalLoss', 'GHMC', 'QualityFocalLoss'\n                ]:\n                    warnings.warn(\n                        'DeprecationWarning: Determining whether to sampling'\n                        'by loss type is deprecated, please delete sampler in'\n                        'your config when using `FocalLoss`, `GHMC`, '\n                        '`QualityFocalLoss` or other FocalLoss variant.')\n                    self.sampling = False\n                    sampler_cfg = dict(type='PseudoSampler')\n            else:\n                self.sampling = False\n                sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n        self.fp16_enabled = False\n\n        self.prior_generator = build_prior_generator(anchor_generator)\n\n        # Usually the numbers of anchors for each level are the same\n        # except SSD detectors. So it is an int in the most dense\n        # heads but a list of int in SSDHead\n        self.num_base_priors = self.prior_generator.num_base_priors[0]\n        self._init_layers()\n\n    @property\n    def num_anchors(self):\n        warnings.warn('DeprecationWarning: `num_anchors` is deprecated, '\n                      'for consistency or also use '\n                      '`num_base_priors` instead')\n        return self.prior_generator.num_base_priors[0]\n\n    @property\n    def anchor_generator(self):\n        warnings.warn('DeprecationWarning: anchor_generator is deprecated, '\n                      'please use \"prior_generator\" instead')\n        return self.prior_generator\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.conv_cls = nn.Conv2d(self.in_channels,\n                                  self.num_base_priors * self.cls_out_channels,\n                                  1)\n        self.conv_reg = nn.Conv2d(self.in_channels, self.num_base_priors * 4,\n                                  1)\n\n    def forward_single(self, x):\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n\n        Returns:\n            tuple:\n                cls_score (Tensor): Cls scores for a single scale level \\\n                    the channels number is num_base_priors * num_classes.\n                bbox_pred (Tensor): Box energies / deltas for a single scale \\\n                    level, the channels number is num_base_priors * 4.\n        \"\"\"\n        cls_score = self.conv_cls(x)\n        bbox_pred = self.conv_reg(x)\n        return cls_score, bbox_pred\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: A tuple of classification scores and bbox prediction.\n\n                - cls_scores (list[Tensor]): Classification scores for all \\\n                    scale levels, each is a 4D-tensor, the channels number \\\n                    is num_base_priors * num_classes.\n                - bbox_preds (list[Tensor]): Box energies / deltas for all \\\n                    scale levels, each is a 4D-tensor, the channels number \\\n                    is num_base_priors * 4.\n        \"\"\"\n        return multi_apply(self.forward_single, feats)\n\n    def get_anchors(self, featmap_sizes, img_metas, device='cuda'):\n        \"\"\"Get anchors according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            img_metas (list[dict]): Image meta info.\n            device (torch.device | str): Device for returned tensors\n\n        Returns:\n            tuple:\n                anchor_list (list[Tensor]): Anchors of each image.\n                valid_flag_list (list[Tensor]): Valid flags of each image.\n        \"\"\"\n        num_imgs = len(img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # anchors for one time\n        multi_level_anchors = self.prior_generator.grid_priors(\n            featmap_sizes, device=device)\n        anchor_list = [multi_level_anchors for _ in range(num_imgs)]\n\n        # for each image, we compute valid flags of multi level anchors\n        valid_flag_list = []\n        for img_id, img_meta in enumerate(img_metas):\n            multi_level_flags = self.prior_generator.valid_flags(\n                featmap_sizes, img_meta['pad_shape'], device)\n            valid_flag_list.append(multi_level_flags)\n\n        return anchor_list, valid_flag_list\n\n    def _get_targets_single(self,\n                            flat_anchors,\n                            valid_flags,\n                            gt_bboxes,\n                            gt_bboxes_ignore,\n                            gt_labels,\n                            img_meta,\n                            label_channels=1,\n                            unmap_outputs=True):\n        \"\"\"Compute regression and classification targets for anchors in a\n        single image.\n\n        Args:\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors ,4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors,).\n            gt_bboxes (Tensor): Ground truth bboxes of the image,\n                shape (num_gts, 4).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n            img_meta (dict): Meta info of the image.\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts,).\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple:\n                labels_list (list[Tensor]): Labels of each level\n                label_weights_list (list[Tensor]): Label weights of each level\n                bbox_targets_list (list[Tensor]): BBox targets of each level\n                bbox_weights_list (list[Tensor]): BBox weights of each level\n                num_total_pos (int): Number of positive samples in all images\n                num_total_neg (int): Number of negative samples in all images\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg.allowed_border)\n        if not inside_flags.any():\n            return (None, ) * 7\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n\n        assign_result = self.assigner.assign(\n            anchors, gt_bboxes, gt_bboxes_ignore,\n            None if self.sampling else gt_labels)\n        sampling_result = self.sampler.sample(assign_result, anchors,\n                                              gt_bboxes)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        bbox_weights = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            if not self.reg_decoded_bbox:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n            else:\n                pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n            if gt_labels is None:\n                # Only rpn gives gt_labels as None\n                # Foreground is the first class since v2.5.0\n                labels[pos_inds] = 0\n            else:\n                labels[pos_inds] = gt_labels[\n                    sampling_result.pos_assigned_gt_inds]\n            if self.train_cfg.pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg.pos_weight\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags,\n                fill=self.num_classes)  # fill bg label\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                neg_inds, sampling_result)\n\n    def get_targets(self,\n                    anchor_list,\n                    valid_flag_list,\n                    gt_bboxes_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None,\n                    gt_labels_list=None,\n                    label_channels=1,\n                    unmap_outputs=True,\n                    return_sampling_results=False):\n        \"\"\"Compute regression and classification targets for anchors in\n        multiple images.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, 4).\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be\n                ignored.\n            gt_labels_list (list[Tensor]): Ground truth labels of each box.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each\n                  level.\n                - bbox_targets_list (list[Tensor]): BBox targets of each level.\n                - bbox_weights_list (list[Tensor]): BBox weights of each level.\n                - num_total_pos (int): Number of positive samples in all\n                  images.\n                - num_total_neg (int): Number of negative samples in all\n                  images.\n\n            additional_returns: This function enables user-defined returns from\n                `self._get_targets_single`. These returns are currently refined\n                to properties at each feature map (i.e. having HxW dimension).\n                The results will be concatenated after the end\n        \"\"\"\n        num_imgs = len(img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        # concat all level anchors to a single tensor\n        concat_anchor_list = []\n        concat_valid_flag_list = []\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            concat_anchor_list.append(torch.cat(anchor_list[i]))\n            concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n        if gt_labels_list is None:\n            gt_labels_list = [None for _ in range(num_imgs)]\n        results = multi_apply(\n            self._get_targets_single,\n            concat_anchor_list,\n            concat_valid_flag_list,\n            gt_bboxes_list,\n            gt_bboxes_ignore_list,\n            gt_labels_list,\n            img_metas,\n            label_channels=label_channels,\n            unmap_outputs=unmap_outputs)\n        (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,\n         pos_inds_list, neg_inds_list, sampling_results_list) = results[:7]\n        rest_results = list(results[7:])  # user-added return values\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n        # sampled anchors of all images\n        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n        # split targets to a list w.r.t. multiple levels\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors)\n        res = (labels_list, label_weights_list, bbox_targets_list,\n               bbox_weights_list, num_total_pos, num_total_neg)\n        if return_sampling_results:\n            res = res + (sampling_results_list, )\n        for i, r in enumerate(rest_results):  # user-added return values\n            rest_results[i] = images_to_levels(r, num_level_anchors)\n\n        return res + tuple(rest_results)\n\n    def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,\n                    bbox_targets, bbox_weights, num_total_samples):\n        \"\"\"Compute loss of a single scale level.\n\n        Args:\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W).\n            bbox_pred (Tensor): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (N, num_total_anchors, 4).\n            bbox_weights (Tensor): BBox regression loss weights of each anchor\n                with shape (N, num_total_anchors, 4).\n            num_total_samples (int): If sampling, num total samples equal to\n                the number of total anchors; Otherwise, it is the number of\n                positive anchors.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        # classification loss\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        cls_score = cls_score.permute(0, 2, 3,\n                                      1).reshape(-1, self.cls_out_channels)\n        loss_cls = self.loss_cls(\n            cls_score, labels, label_weights, avg_factor=num_total_samples)\n        # regression loss\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        bbox_weights = bbox_weights.reshape(-1, 4)\n        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n        if self.reg_decoded_bbox:\n            # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n            # is applied directly on the decoded bounding boxes, it\n            # decodes the already encoded coordinates to absolute format.\n            anchors = anchors.reshape(-1, 4)\n            bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)\n        loss_bbox = self.loss_bbox(\n            bbox_pred,\n            bbox_targets,\n            bbox_weights,\n            avg_factor=num_total_samples)\n        return loss_cls, loss_bbox\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss. Default: None\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels)\n        if cls_reg_targets is None:\n            return None\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg) = cls_reg_targets\n        num_total_samples = (\n            num_total_pos + num_total_neg if self.sampling else num_total_pos)\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        # concat all level anchors and flags to a single tensor\n        concat_anchor_list = []\n        for i in range(len(anchor_list)):\n            concat_anchor_list.append(torch.cat(anchor_list[i]))\n        all_anchor_list = images_to_levels(concat_anchor_list,\n                                           num_level_anchors)\n\n        losses_cls, losses_bbox = multi_apply(\n            self.loss_single,\n            cls_scores,\n            bbox_preds,\n            all_anchor_list,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            num_total_samples=num_total_samples)\n        return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n\n    def aug_test(self, feats, img_metas, rescale=False):\n        \"\"\"Test function with test time augmentation.\n\n        Args:\n            feats (list[Tensor]): the outer list indicates test-time\n                augmentations and inner Tensor should have a shape NxCxHxW,\n                which contains features for all images in the batch.\n            img_metas (list[list[dict]]): the outer list indicates test-time\n                augs (multiscale, flip, etc.) and the inner list indicates\n                images in a batch. each dict has image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n                The first item is ``bboxes`` with shape (n, 5), where\n                5 represent (tl_x, tl_y, br_x, br_y, score).\n                The shape of the second tensor in the tuple is ``labels``\n                with shape (n,), The length of list should always be 1.\n        \"\"\"\n        return self.aug_test_bboxes(feats, img_metas, rescale=rescale)\n"
  },
  {
    "path": "mmdet/models/dense_heads/ascend_anchor_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ...core.bbox.assigners import AscendMaxIoUAssigner\nfrom ...core.bbox.samplers import PseudoSampler\nfrom ...utils import (batch_images_to_levels, get_max_num_gt_division_factor,\n                      masked_fill)\nfrom ..builder import HEADS\nfrom .anchor_head import AnchorHead\n\n\n@HEADS.register_module()\nclass AscendAnchorHead(AnchorHead):\n    \"\"\"Ascend Anchor-based head (RetinaNet, SSD, etc.).\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels. Used in child classes.\n        anchor_generator (dict): Config dict for anchor generator\n        bbox_coder (dict): Config of bounding box coder.\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Default False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        loss_cls (dict): Config of classification loss.\n        loss_bbox (dict): Config of localization loss.\n        train_cfg (dict): Training config of anchor head.\n        test_cfg (dict): Testing config of anchor head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 feat_channels=256,\n                 anchor_generator=dict(\n                     type='AnchorGenerator',\n                     scales=[8, 16, 32],\n                     ratios=[0.5, 1.0, 2.0],\n                     strides=[4, 8, 16, 32, 64]),\n                 bbox_coder=dict(\n                     type='DeltaXYWHBBoxCoder',\n                     clip_border=True,\n                     target_means=(.0, .0, .0, .0),\n                     target_stds=(1.0, 1.0, 1.0, 1.0)),\n                 reg_decoded_bbox=False,\n                 loss_cls=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 loss_bbox=dict(\n                     type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=dict(type='Normal', layer='Conv2d', std=0.01)):\n        super(AscendAnchorHead, self).__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            feat_channels=feat_channels,\n            anchor_generator=anchor_generator,\n            bbox_coder=bbox_coder,\n            reg_decoded_bbox=reg_decoded_bbox,\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg)\n\n    def get_batch_gt_bboxes(self, gt_bboxes_list, num_images, gt_nums, device,\n                            max_gt_labels):\n        \"\"\"Get ground truth bboxes of all image.\n\n        Args:\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            num_images (int): The num of images.\n            gt_nums(list[int]): The ground truth bboxes num of each image.\n            device (torch.device | str): Device for returned tensors\n            max_gt_labels(int): The max ground truth bboxes num of all image.\n        Returns:\n            batch_gt_bboxes: (Tensor): Ground truth bboxes of all image.\n        \"\"\"\n        # a static ground truth boxes.\n        # Save static gt. Related to Ascend. Helps improve performance\n        if not hasattr(self, 'batch_gt_bboxes'):\n            self.batch_gt_bboxes = {}\n        # a min anchor filled the excess anchor\n        if not hasattr(self, 'min_anchor'):\n            self.min_anchor = (-1354, -1344)\n        if gt_bboxes_list is None:\n            batch_gt_bboxes = None\n        else:\n            if self.batch_gt_bboxes.get(max_gt_labels) is None:\n                batch_gt_bboxes = torch.zeros((num_images, max_gt_labels, 4),\n                                              dtype=gt_bboxes_list[0].dtype,\n                                              device=device)\n                batch_gt_bboxes[:, :, :2] = self.min_anchor[0]\n                batch_gt_bboxes[:, :, 2:] = self.min_anchor[1]\n                self.batch_gt_bboxes[max_gt_labels] = batch_gt_bboxes.clone()\n            else:\n                batch_gt_bboxes = self.batch_gt_bboxes.get(\n                    max_gt_labels).clone()\n            for index_imgs, gt_bboxes in enumerate(gt_bboxes_list):\n                batch_gt_bboxes[index_imgs, :gt_nums[index_imgs]] = gt_bboxes\n        return batch_gt_bboxes\n\n    def get_batch_gt_bboxes_ignore(self, gt_bboxes_ignore_list, num_images,\n                                   gt_nums, device):\n        \"\"\"Ground truth bboxes to be ignored of all image.\n\n        Args:\n            gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be\n                ignored.\n            num_images (int): The num of images.\n            gt_nums(list[int]): The ground truth bboxes num of each image.\n            device (torch.device | str): Device for returned tensors\n        Returns:\n            batch_gt_bboxes_ignore: (Tensor): Ground truth bboxes to be\n                ignored of all image.\n        \"\"\"\n        # TODO: support gt_bboxes_ignore_list\n        if gt_bboxes_ignore_list is None:\n            batch_gt_bboxes_ignore = None\n        else:\n            raise RuntimeError('gt_bboxes_ignore not support yet')\n        return batch_gt_bboxes_ignore\n\n    def get_batch_gt_labels(self, gt_labels_list, num_images, gt_nums, device,\n                            max_gt_labels):\n        \"\"\"Ground truth bboxes to be ignored of all image.\n\n        Args:\n            gt_labels_list (list[Tensor]): Ground truth labels.\n            num_images (int): The num of images.\n            gt_nums(list[int]): The ground truth bboxes num of each image.\n            device (torch.device | str): Device for returned tensors\n        Returns:\n            batch_gt_labels: (Tensor): Ground truth labels of all image.\n        \"\"\"\n        if gt_labels_list is None:\n            batch_gt_labels = None\n        else:\n            batch_gt_labels = torch.zeros((num_images, max_gt_labels),\n                                          dtype=gt_labels_list[0].dtype,\n                                          device=device)\n            for index_imgs, gt_labels in enumerate(gt_labels_list):\n                batch_gt_labels[index_imgs, :gt_nums[index_imgs]] = gt_labels\n\n        return batch_gt_labels\n\n    def _get_targets_concat(self,\n                            batch_anchors,\n                            batch_valid_flags,\n                            batch_gt_bboxes,\n                            batch_gt_bboxes_ignore,\n                            batch_gt_labels,\n                            img_metas,\n                            label_channels=1,\n                            unmap_outputs=True):\n        \"\"\"Compute regression and classification targets for anchors in all\n        images.\n\n        Args:\n            batch_anchors (Tensor): anchors of all image, which are\n                concatenated into a single tensor of\n                shape (num_imgs, num_anchors ,4).\n            batch_valid_flags (Tensor): valid flags of all image,\n                which are concatenated into a single tensor of\n                    shape (num_imgs, num_anchors,).\n            batch_gt_bboxes (Tensor): Ground truth bboxes of all image,\n                shape (num_imgs, max_gt_nums, 4).\n            batch_gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_imgs, num_ignored_gts, 4).\n            batch_gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_imgs, max_gt_nums,).\n            img_metas (list[dict]): Meta info of each image.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple:\n                batch_labels (Tensor): Labels of all level\n                batch_label_weights (Tensor): Label weights of all level\n                batch_bbox_targets (Tensor): BBox targets of all level\n                batch_bbox_weights (Tensor): BBox weights of all level\n                batch_pos_mask (Tensor): Positive samples mask in all images\n                batch_neg_mask (Tensor): Negative samples mask in all images\n                sampling_result (Sampling): The result of sampling,\n                    default: None.\n        \"\"\"\n        num_imgs, num_anchors, _ = batch_anchors.size()\n        # assign gt and sample batch_anchors\n        assign_result = self.assigner.assign(\n            batch_anchors,\n            batch_gt_bboxes,\n            batch_gt_bboxes_ignore,\n            None if self.sampling else batch_gt_labels,\n            batch_bboxes_ignore_mask=batch_valid_flags)\n        # TODO: support sampling_result\n        sampling_result = None\n        batch_pos_mask = assign_result.batch_pos_mask\n        batch_neg_mask = assign_result.batch_neg_mask\n        batch_anchor_gt_indes = assign_result.batch_anchor_gt_indes\n        batch_anchor_gt_labels = assign_result.batch_anchor_gt_labels\n\n        batch_anchor_gt_bboxes = torch.zeros(\n            batch_anchors.size(),\n            dtype=batch_anchors.dtype,\n            device=batch_anchors.device)\n        for index_imgs in range(num_imgs):\n            batch_anchor_gt_bboxes[index_imgs] = torch.index_select(\n                batch_gt_bboxes[index_imgs], 0,\n                batch_anchor_gt_indes[index_imgs])\n\n        batch_bbox_targets = torch.zeros_like(batch_anchors)\n        batch_bbox_weights = torch.zeros_like(batch_anchors)\n        batch_labels = batch_anchors.new_full((num_imgs, num_anchors),\n                                              self.num_classes,\n                                              dtype=torch.int)\n        batch_label_weights = batch_anchors.new_zeros((num_imgs, num_anchors),\n                                                      dtype=torch.float)\n\n        if not self.reg_decoded_bbox:\n            batch_pos_bbox_targets = self.bbox_coder.encode(\n                batch_anchors, batch_anchor_gt_bboxes)\n        else:\n            batch_pos_bbox_targets = batch_anchor_gt_bboxes\n\n        batch_bbox_targets = masked_fill(batch_bbox_targets,\n                                         batch_pos_mask.unsqueeze(2),\n                                         batch_pos_bbox_targets)\n        batch_bbox_weights = masked_fill(batch_bbox_weights,\n                                         batch_pos_mask.unsqueeze(2), 1.0)\n        if batch_gt_labels is None:\n            batch_labels = masked_fill(batch_labels, batch_pos_mask, 0.0)\n        else:\n            batch_labels = masked_fill(batch_labels, batch_pos_mask,\n                                       batch_anchor_gt_labels)\n        if self.train_cfg.pos_weight <= 0:\n            batch_label_weights = masked_fill(batch_label_weights,\n                                              batch_pos_mask, 1.0)\n        else:\n            batch_label_weights = masked_fill(batch_label_weights,\n                                              batch_pos_mask,\n                                              self.train_cfg.pos_weight)\n        batch_label_weights = masked_fill(batch_label_weights, batch_neg_mask,\n                                          1.0)\n        return (batch_labels, batch_label_weights, batch_bbox_targets,\n                batch_bbox_weights, batch_pos_mask, batch_neg_mask,\n                sampling_result)\n\n    def get_targets(self,\n                    anchor_list,\n                    valid_flag_list,\n                    gt_bboxes_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None,\n                    gt_labels_list=None,\n                    label_channels=1,\n                    unmap_outputs=True,\n                    return_sampling_results=False,\n                    return_level=True):\n        \"\"\"Compute regression and classification targets for anchors in\n        multiple images.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, 4).\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be\n                ignored.\n            gt_labels_list (list[Tensor]): Ground truth labels of each box.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n            return_sampling_results (bool): Whether to return the result of\n                sample.\n            return_level (bool): Whether to map outputs back to the levels\n                of feature map sizes.\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each\n                  level.\n                - bbox_targets_list (list[Tensor]): BBox targets of each level.\n                - bbox_weights_list (list[Tensor]): BBox weights of each level.\n                - num_total_pos (int): Number of positive samples in all\n                  images.\n                - num_total_neg (int): Number of negative samples in all\n                  images.\n\n            additional_returns: This function enables user-defined returns from\n                `self._get_targets_single`. These returns are currently refined\n                to properties at each feature map (i.e. having HxW dimension).\n                The results will be concatenated after the end\n        \"\"\"\n        assert gt_bboxes_ignore_list is None\n        assert unmap_outputs is True\n        assert return_sampling_results is False\n        assert self.train_cfg.allowed_border < 0\n        assert isinstance(self.assigner, AscendMaxIoUAssigner)\n        assert isinstance(self.sampler, PseudoSampler)\n        num_imgs = len(img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        device = anchor_list[0][0].device\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n\n        batch_anchor_list = []\n        batch_valid_flag_list = []\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            batch_anchor_list.append(torch.cat(anchor_list[i]))\n            batch_valid_flag_list.append(torch.cat(valid_flag_list[i]))\n        batch_anchors = torch.cat(\n            [torch.unsqueeze(anchor, 0) for anchor in batch_anchor_list], 0)\n        batch_valid_flags = torch.cat([\n            torch.unsqueeze(batch_valid_flag, 0)\n            for batch_valid_flag in batch_valid_flag_list\n        ], 0)\n\n        gt_nums = [len(gt_bbox) for gt_bbox in gt_bboxes_list]\n        max_gt_nums = get_max_num_gt_division_factor(gt_nums)\n        batch_gt_bboxes = self.get_batch_gt_bboxes(gt_bboxes_list, num_imgs,\n                                                   gt_nums, device,\n                                                   max_gt_nums)\n        batch_gt_bboxes_ignore = self.get_batch_gt_bboxes_ignore(\n            gt_bboxes_ignore_list, num_imgs, gt_nums, device)\n        batch_gt_labels = self.get_batch_gt_labels(gt_labels_list, num_imgs,\n                                                   gt_nums, device,\n                                                   max_gt_nums)\n\n        results = self._get_targets_concat(\n            batch_anchors,\n            batch_valid_flags,\n            batch_gt_bboxes,\n            batch_gt_bboxes_ignore,\n            batch_gt_labels,\n            img_metas,\n            label_channels=label_channels,\n            unmap_outputs=unmap_outputs)\n\n        (batch_labels, batch_label_weights, batch_bbox_targets,\n         batch_bbox_weights, batch_pos_mask, batch_neg_mask,\n         sampling_result) = results[:7]\n        rest_results = list(results[7:])  # user-added return values\n\n        # sampled anchors of all images\n        min_num = torch.ones((num_imgs, ),\n                             dtype=torch.long,\n                             device=batch_pos_mask.device)\n        num_total_pos = torch.sum(\n            torch.max(torch.sum(batch_pos_mask, dim=1), min_num))\n        num_total_neg = torch.sum(\n            torch.max(torch.sum(batch_neg_mask, dim=1), min_num))\n        if return_level is True:\n            labels_list = batch_images_to_levels(batch_labels,\n                                                 num_level_anchors)\n            label_weights_list = batch_images_to_levels(\n                batch_label_weights, num_level_anchors)\n            bbox_targets_list = batch_images_to_levels(batch_bbox_targets,\n                                                       num_level_anchors)\n            bbox_weights_list = batch_images_to_levels(batch_bbox_weights,\n                                                       num_level_anchors)\n            res = (labels_list, label_weights_list, bbox_targets_list,\n                   bbox_weights_list, num_total_pos, num_total_neg)\n            if return_sampling_results:\n                res = res + (sampling_result, )\n            for i, r in enumerate(rest_results):  # user-added return values\n                rest_results[i] = batch_images_to_levels(r, num_level_anchors)\n\n            return res + tuple(rest_results)\n        else:\n            res = (batch_labels, batch_label_weights, batch_bbox_targets,\n                   batch_bbox_weights, batch_pos_mask, batch_neg_mask,\n                   sampling_result, num_total_pos, num_total_neg,\n                   batch_anchors)\n            return res\n"
  },
  {
    "path": "mmdet/models/dense_heads/ascend_retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import HEADS\nfrom .ascend_anchor_head import AscendAnchorHead\nfrom .retina_head import RetinaHead\n\n\n@HEADS.register_module()\nclass AscendRetinaHead(RetinaHead, AscendAnchorHead):\n    r\"\"\"An anchor-based head used in `RetinaNet\n    <https://arxiv.org/pdf/1708.02002.pdf>`_.\n\n    The head contains two subnetworks. The first classifies anchor boxes and\n    the second regresses deltas for the anchors.\n\n    Example:\n        >>> import torch\n        >>> self = RetinaHead(11, 7)\n        >>> x = torch.rand(1, 7, 32, 32)\n        >>> cls_score, bbox_pred = self.forward_single(x)\n        >>> # Each anchor predicts a score for each class except background\n        >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors\n        >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors\n        >>> assert cls_per_anchor == (self.num_classes)\n        >>> assert box_per_anchor == 4\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 stacked_convs=4,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 anchor_generator=dict(\n                     type='AnchorGenerator',\n                     octave_base_scale=4,\n                     scales_per_octave=3,\n                     ratios=[0.5, 1.0, 2.0],\n                     strides=[8, 16, 32, 64, 128]),\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='retina_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs):\n        super(AscendRetinaHead, self).__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            stacked_convs=stacked_convs,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            anchor_generator=anchor_generator,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def get_targets(self,\n                    anchor_list,\n                    valid_flag_list,\n                    gt_bboxes_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None,\n                    gt_labels_list=None,\n                    label_channels=1,\n                    unmap_outputs=True,\n                    return_sampling_results=False,\n                    return_level=True):\n        \"\"\"Compute regression and classification targets for anchors in\n        multiple images.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, 4).\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be\n                ignored.\n            gt_labels_list (list[Tensor]): Ground truth labels of each box.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n            return_sampling_results (bool): Whether to return the result of\n                sample.\n            return_level (bool): Whether to map outputs back to the levels\n                of feature map sizes.\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each\n                  level.\n                - bbox_targets_list (list[Tensor]): BBox targets of each level.\n                - bbox_weights_list (list[Tensor]): BBox weights of each level.\n                - num_total_pos (int): Number of positive samples in all\n                  images.\n                - num_total_neg (int): Number of negative samples in all\n                  images.\n\n            additional_returns: This function enables user-defined returns from\n                `self._get_targets_single`. These returns are currently refined\n                to properties at each feature map (i.e. having HxW dimension).\n                The results will be concatenated after the end\n        \"\"\"\n        return AscendAnchorHead.get_targets(\n            self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas,\n            gt_bboxes_ignore_list, gt_labels_list, label_channels,\n            unmap_outputs, return_sampling_results, return_level)\n"
  },
  {
    "path": "mmdet/models/dense_heads/ascend_ssd_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.runner import force_fp32\n\nfrom ..builder import HEADS\nfrom ..losses import smooth_l1_loss\nfrom .ascend_anchor_head import AscendAnchorHead\nfrom .ssd_head import SSDHead\n\n\n@HEADS.register_module()\nclass AscendSSDHead(SSDHead, AscendAnchorHead):\n    \"\"\"Ascend SSD head used in https://arxiv.org/abs/1512.02325.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        stacked_convs (int): Number of conv layers in cls and reg tower.\n            Default: 0.\n        feat_channels (int): Number of hidden channels when stacked_convs\n            > 0. Default: 256.\n        use_depthwise (bool): Whether to use DepthwiseSeparableConv.\n            Default: False.\n        conv_cfg (dict): Dictionary to construct and config conv layer.\n            Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: None.\n        act_cfg (dict): Dictionary to construct and config activation layer.\n            Default: None.\n        anchor_generator (dict): Config dict for anchor generator\n        bbox_coder (dict): Config of bounding box coder.\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Default False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        train_cfg (dict): Training config of anchor head.\n        test_cfg (dict): Testing config of anchor head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_classes=80,\n                 in_channels=(512, 1024, 512, 256, 256, 256),\n                 stacked_convs=0,\n                 feat_channels=256,\n                 use_depthwise=False,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 act_cfg=None,\n                 anchor_generator=dict(\n                     type='SSDAnchorGenerator',\n                     scale_major=False,\n                     input_size=300,\n                     strides=[8, 16, 32, 64, 100, 300],\n                     ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),\n                     basesize_ratio_range=(0.1, 0.9)),\n                 bbox_coder=dict(\n                     type='DeltaXYWHBBoxCoder',\n                     clip_border=True,\n                     target_means=[.0, .0, .0, .0],\n                     target_stds=[1.0, 1.0, 1.0, 1.0],\n                 ),\n                 reg_decoded_bbox=False,\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=dict(\n                     type='Xavier',\n                     layer='Conv2d',\n                     distribution='uniform',\n                     bias=0)):\n        super(AscendSSDHead, self).__init__(\n            num_classes=num_classes,\n            in_channels=in_channels,\n            stacked_convs=stacked_convs,\n            feat_channels=feat_channels,\n            use_depthwise=use_depthwise,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg,\n            anchor_generator=anchor_generator,\n            bbox_coder=bbox_coder,\n            reg_decoded_bbox=reg_decoded_bbox,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg)\n        assert self.reg_decoded_bbox is False, \\\n            'reg_decoded_bbox only support False now.'\n\n    def get_static_anchors(self, featmap_sizes, img_metas, device='cuda'):\n        \"\"\"Get static anchors according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            img_metas (list[dict]): Image meta info.\n            device (torch.device | str): Device for returned tensors\n\n        Returns:\n            tuple:\n                anchor_list (list[Tensor]): Anchors of each image.\n                valid_flag_list (list[Tensor]): Valid flags of each image.\n        \"\"\"\n        if not hasattr(self, 'static_anchors') or \\\n                not hasattr(self, 'static_valid_flags'):\n            static_anchors, static_valid_flags = self.get_anchors(\n                featmap_sizes, img_metas, device)\n            self.static_anchors = static_anchors\n            self.static_valid_flags = static_valid_flags\n        return self.static_anchors, self.static_valid_flags\n\n    def get_targets(self,\n                    anchor_list,\n                    valid_flag_list,\n                    gt_bboxes_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None,\n                    gt_labels_list=None,\n                    label_channels=1,\n                    unmap_outputs=True,\n                    return_sampling_results=False,\n                    return_level=True):\n        \"\"\"Compute regression and classification targets for anchors in\n        multiple images.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, 4).\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be\n                ignored.\n            gt_labels_list (list[Tensor]): Ground truth labels of each box.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n            return_sampling_results (bool): Whether to return the result of\n                sample.\n            return_level (bool): Whether to map outputs back to the levels\n                of feature map sizes.\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each\n                  level.\n                - bbox_targets_list (list[Tensor]): BBox targets of each level.\n                - bbox_weights_list (list[Tensor]): BBox weights of each level.\n                - num_total_pos (int): Number of positive samples in all\n                  images.\n                - num_total_neg (int): Number of negative samples in all\n                  images.\n\n            additional_returns: This function enables user-defined returns from\n                `self._get_targets_single`. These returns are currently refined\n                to properties at each feature map (i.e. having HxW dimension).\n                The results will be concatenated after the end\n        \"\"\"\n        return AscendAnchorHead.get_targets(\n            self,\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes_list,\n            img_metas,\n            gt_bboxes_ignore_list,\n            gt_labels_list,\n            label_channels,\n            unmap_outputs,\n            return_sampling_results,\n            return_level,\n        )\n\n    def batch_loss(self, batch_cls_score, batch_bbox_pred, batch_anchor,\n                   batch_labels, batch_label_weights, batch_bbox_targets,\n                   batch_bbox_weights, batch_pos_mask, batch_neg_mask,\n                   num_total_samples):\n        \"\"\"Compute loss of all images.\n\n        Args:\n            batch_cls_score (Tensor): Box scores for all image\n                Has shape (num_imgs, num_total_anchors, num_classes).\n            batch_bbox_pred (Tensor): Box energies / deltas for all image\n                level with shape (num_imgs, num_total_anchors, 4).\n            batch_anchor (Tensor): Box reference for all image with shape\n                (num_imgs, num_total_anchors, 4).\n            batch_labels (Tensor): Labels of all anchors with shape\n                (num_imgs, num_total_anchors,).\n            batch_label_weights (Tensor): Label weights of all anchor with\n                shape (num_imgs, num_total_anchors,)\n            batch_bbox_targets (Tensor): BBox regression targets of all anchor\n                weight shape (num_imgs, num_total_anchors, 4).\n            batch_bbox_weights (Tensor): BBox regression loss weights of\n                all anchor with shape (num_imgs, num_total_anchors, 4).\n            batch_pos_mask (Tensor): Positive samples mask in all images.\n            batch_neg_mask (Tensor): negative samples mask in all images.\n            num_total_samples (int): If sampling, num total samples equal to\n                the number of total anchors; Otherwise, it is the number of\n                positive anchors.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_images, num_anchors, _ = batch_anchor.size()\n\n        batch_loss_cls_all = F.cross_entropy(\n            batch_cls_score.view((-1, self.cls_out_channels)),\n            batch_labels.view(-1),\n            reduction='none').view(\n                batch_label_weights.size()) * batch_label_weights\n        # # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        batch_num_pos_samples = torch.sum(batch_pos_mask, dim=1)\n        batch_num_neg_samples = \\\n            self.train_cfg.neg_pos_ratio * batch_num_pos_samples\n\n        batch_num_neg_samples_max = torch.sum(batch_neg_mask, dim=1)\n        batch_num_neg_samples = torch.min(batch_num_neg_samples,\n                                          batch_num_neg_samples_max)\n\n        batch_topk_loss_cls_neg, _ = torch.topk(\n            batch_loss_cls_all * batch_neg_mask, k=num_anchors, dim=1)\n        batch_loss_cls_pos = torch.sum(\n            batch_loss_cls_all * batch_pos_mask, dim=1)\n\n        anchor_index = torch.arange(\n            end=num_anchors, dtype=torch.float,\n            device=batch_anchor.device).view((1, -1))\n        topk_loss_neg_mask = (anchor_index < batch_num_neg_samples.view(\n            -1, 1)).float()\n\n        batch_loss_cls_neg = torch.sum(\n            batch_topk_loss_cls_neg * topk_loss_neg_mask, dim=1)\n        loss_cls = \\\n            (batch_loss_cls_pos + batch_loss_cls_neg) / num_total_samples\n\n        if self.reg_decoded_bbox:\n            # TODO: support self.reg_decoded_bbox is True\n            raise RuntimeError\n\n        loss_bbox_all = smooth_l1_loss(\n            batch_bbox_pred,\n            batch_bbox_targets,\n            batch_bbox_weights,\n            reduction='none',\n            beta=self.train_cfg.smoothl1_beta,\n            avg_factor=num_total_samples)\n        eps = torch.finfo(torch.float32).eps\n\n        sum_dim = (i for i in range(1, len(loss_bbox_all.size())))\n        loss_bbox = loss_bbox_all.sum(tuple(sum_dim)) / (\n            num_total_samples + eps)\n        return loss_cls[None], loss_bbox\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            gt_bboxes (list[Tensor]): each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=1,\n            unmap_outputs=True,\n            return_level=False)\n        if cls_reg_targets is None:\n            return None\n\n        (batch_labels, batch_label_weights, batch_bbox_targets,\n         batch_bbox_weights, batch_pos_mask, batch_neg_mask, sampling_result,\n         num_total_pos, num_total_neg, batch_anchors) = cls_reg_targets\n\n        num_imgs = len(img_metas)\n        batch_cls_score = torch.cat([\n            s.permute(0, 2, 3, 1).reshape(num_imgs, -1, self.cls_out_channels)\n            for s in cls_scores\n        ], 1)\n\n        batch_bbox_pred = torch.cat([\n            b.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) for b in bbox_preds\n        ], -2)\n\n        batch_losses_cls, batch_losses_bbox = self.batch_loss(\n            batch_cls_score, batch_bbox_pred, batch_anchors, batch_labels,\n            batch_label_weights, batch_bbox_targets, batch_bbox_weights,\n            batch_pos_mask, batch_neg_mask, num_total_pos)\n        losses_cls = [\n            batch_losses_cls[:, index_imgs] for index_imgs in range(num_imgs)\n        ]\n        losses_bbox = [losses_bbox for losses_bbox in batch_losses_bbox]\n        return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n"
  },
  {
    "path": "mmdet/models/dense_heads/atss_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scale\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (anchor_inside_flags, build_assigner, build_sampler,\n                        images_to_levels, multi_apply, reduce_mean, unmap)\nfrom ..builder import HEADS, build_loss\nfrom .anchor_head import AnchorHead\n\n\n@HEADS.register_module()\nclass ATSSHead(AnchorHead):\n    \"\"\"Bridging the Gap Between Anchor-based and Anchor-free Detection via\n    Adaptive Training Sample Selection.\n\n    ATSS head structure is similar with FCOS, however ATSS use anchor boxes\n    and assign label by Adaptive Training Sample Selection instead max-iou.\n\n    https://arxiv.org/abs/1912.02424\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 pred_kernel_size=3,\n                 stacked_convs=4,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n                 reg_decoded_bbox=True,\n                 loss_centerness=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='atss_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs):\n        self.pred_kernel_size = pred_kernel_size\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        super(ATSSHead, self).__init__(\n            num_classes,\n            in_channels,\n            reg_decoded_bbox=reg_decoded_bbox,\n            init_cfg=init_cfg,\n            **kwargs)\n\n        self.sampling = False\n        if self.train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            # SSD sampling=False so use PseudoSampler\n            sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n        self.loss_centerness = build_loss(loss_centerness)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        pred_pad_size = self.pred_kernel_size // 2\n        self.atss_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_anchors * self.cls_out_channels,\n            self.pred_kernel_size,\n            padding=pred_pad_size)\n        self.atss_reg = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * 4,\n            self.pred_kernel_size,\n            padding=pred_pad_size)\n        self.atss_centerness = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * 1,\n            self.pred_kernel_size,\n            padding=pred_pad_size)\n        self.scales = nn.ModuleList(\n            [Scale(1.0) for _ in self.prior_generator.strides])\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n                cls_scores (list[Tensor]): Classification scores for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_anchors * num_classes.\n                bbox_preds (list[Tensor]): Box energies / deltas for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_anchors * 4.\n        \"\"\"\n        return multi_apply(self.forward_single, feats, self.scales)\n\n    def forward_single(self, x, scale):\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n            scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n\n        Returns:\n            tuple:\n                cls_score (Tensor): Cls scores for a single scale level\n                    the channels number is num_anchors * num_classes.\n                bbox_pred (Tensor): Box energies / deltas for a single scale\n                    level, the channels number is num_anchors * 4.\n                centerness (Tensor): Centerness for a single scale level, the\n                    channel number is (N, num_anchors * 1, H, W).\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n        cls_score = self.atss_cls(cls_feat)\n        # we just follow atss, not apply exp in bbox_pred\n        bbox_pred = scale(self.atss_reg(reg_feat)).float()\n        centerness = self.atss_centerness(reg_feat)\n        return cls_score, bbox_pred, centerness\n\n    def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels,\n                    label_weights, bbox_targets, num_total_samples):\n        \"\"\"Compute loss of a single scale level.\n\n        Args:\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W).\n            bbox_pred (Tensor): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (N, num_total_anchors, 4).\n            num_total_samples (int): Number os positive samples that is\n                reduced over all GPUs.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        anchors = anchors.reshape(-1, 4)\n        cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.cls_out_channels).contiguous()\n        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n        centerness = centerness.permute(0, 2, 3, 1).reshape(-1)\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n\n        # classification loss\n        loss_cls = self.loss_cls(\n            cls_score, labels, label_weights, avg_factor=num_total_samples)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    & (labels < bg_class_ind)).nonzero().squeeze(1)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n            pos_anchors = anchors[pos_inds]\n            pos_centerness = centerness[pos_inds]\n\n            centerness_targets = self.centerness_target(\n                pos_anchors, pos_bbox_targets)\n            pos_decode_bbox_pred = self.bbox_coder.decode(\n                pos_anchors, pos_bbox_pred)\n\n            # regression loss\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_bbox_targets,\n                weight=centerness_targets,\n                avg_factor=1.0)\n\n            # centerness loss\n            loss_centerness = self.loss_centerness(\n                pos_centerness,\n                centerness_targets,\n                avg_factor=num_total_samples)\n\n        else:\n            loss_bbox = bbox_pred.sum() * 0\n            loss_centerness = centerness.sum() * 0\n            centerness_targets = bbox_targets.new_tensor(0.)\n\n        return loss_cls, loss_bbox, loss_centerness, centerness_targets.sum()\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             centernesses,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            centernesses (list[Tensor]): Centerness for each scale\n                level with shape (N, num_anchors * 1, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor] | None): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels)\n        if cls_reg_targets is None:\n            return None\n\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets\n\n        num_total_samples = reduce_mean(\n            torch.tensor(num_total_pos, dtype=torch.float,\n                         device=device)).item()\n        num_total_samples = max(num_total_samples, 1.0)\n\n        losses_cls, losses_bbox, loss_centerness,\\\n            bbox_avg_factor = multi_apply(\n                self.loss_single,\n                anchor_list,\n                cls_scores,\n                bbox_preds,\n                centernesses,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                num_total_samples=num_total_samples)\n\n        bbox_avg_factor = sum(bbox_avg_factor)\n        bbox_avg_factor = reduce_mean(bbox_avg_factor).clamp_(min=1).item()\n        losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n        return dict(\n            loss_cls=losses_cls,\n            loss_bbox=losses_bbox,\n            loss_centerness=loss_centerness)\n\n    def centerness_target(self, anchors, gts):\n        # only calculate pos centerness targets, otherwise there may be nan\n        anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2\n        anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2\n        l_ = anchors_cx - gts[:, 0]\n        t_ = anchors_cy - gts[:, 1]\n        r_ = gts[:, 2] - anchors_cx\n        b_ = gts[:, 3] - anchors_cy\n\n        left_right = torch.stack([l_, r_], dim=1)\n        top_bottom = torch.stack([t_, b_], dim=1)\n        centerness = torch.sqrt(\n            (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) *\n            (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]))\n        assert not torch.isnan(centerness).any()\n        return centerness\n\n    def get_targets(self,\n                    anchor_list,\n                    valid_flag_list,\n                    gt_bboxes_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None,\n                    gt_labels_list=None,\n                    label_channels=1,\n                    unmap_outputs=True):\n        \"\"\"Get targets for ATSS head.\n\n        This method is almost the same as `AnchorHead.get_targets()`. Besides\n        returning the targets as the parent method does, it also returns the\n        anchors as the first element of the returned tuple.\n        \"\"\"\n        num_imgs = len(img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        num_level_anchors_list = [num_level_anchors] * num_imgs\n\n        # concat all level anchors and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            anchor_list[i] = torch.cat(anchor_list[i])\n            valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n        if gt_labels_list is None:\n            gt_labels_list = [None for _ in range(num_imgs)]\n        (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n         all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(\n             self._get_target_single,\n             anchor_list,\n             valid_flag_list,\n             num_level_anchors_list,\n             gt_bboxes_list,\n             gt_bboxes_ignore_list,\n             gt_labels_list,\n             img_metas,\n             label_channels=label_channels,\n             unmap_outputs=unmap_outputs)\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n        # sampled anchors of all images\n        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors)\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors)\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, bbox_weights_list, num_total_pos,\n                num_total_neg)\n\n    def _get_target_single(self,\n                           flat_anchors,\n                           valid_flags,\n                           num_level_anchors,\n                           gt_bboxes,\n                           gt_bboxes_ignore,\n                           gt_labels,\n                           img_meta,\n                           label_channels=1,\n                           unmap_outputs=True):\n        \"\"\"Compute regression, classification targets for anchors in a single\n        image.\n\n        Args:\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors ,4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors,).\n            num_level_anchors Tensor): Number of anchors of each scale level.\n            gt_bboxes (Tensor): Ground truth bboxes of the image,\n                shape (num_gts, 4).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts,).\n            img_meta (dict): Meta info of the image.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: N is the number of total anchors in the image.\n                labels (Tensor): Labels of all anchors in the image with shape\n                    (N,).\n                label_weights (Tensor): Label weights of all anchor in the\n                    image with shape (N,).\n                bbox_targets (Tensor): BBox targets of all anchors in the\n                    image with shape (N, 4).\n                bbox_weights (Tensor): BBox weights of all anchors in the\n                    image with shape (N, 4)\n                pos_inds (Tensor): Indices of positive anchor with shape\n                    (num_pos,).\n                neg_inds (Tensor): Indices of negative anchor with shape\n                    (num_neg,).\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg.allowed_border)\n        if not inside_flags.any():\n            return (None, ) * 7\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n\n        num_level_anchors_inside = self.get_num_level_anchors_inside(\n            num_level_anchors, inside_flags)\n        assign_result = self.assigner.assign(anchors, num_level_anchors_inside,\n                                             gt_bboxes, gt_bboxes_ignore,\n                                             gt_labels)\n\n        sampling_result = self.sampler.sample(assign_result, anchors,\n                                              gt_bboxes)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        bbox_weights = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            if self.reg_decoded_bbox:\n                pos_bbox_targets = sampling_result.pos_gt_bboxes\n            else:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n            if gt_labels is None:\n                # Only rpn gives gt_labels as None\n                # Foreground is the first class since v2.5.0\n                labels[pos_inds] = 0\n            else:\n                labels[pos_inds] = gt_labels[\n                    sampling_result.pos_assigned_gt_inds]\n            if self.train_cfg.pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg.pos_weight\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            anchors = unmap(anchors, num_total_anchors, inside_flags)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n        return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n                pos_inds, neg_inds)\n\n    def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n        split_inside_flags = torch.split(inside_flags, num_level_anchors)\n        num_level_anchors_inside = [\n            int(flags.sum()) for flags in split_inside_flags\n        ]\n        return num_level_anchors_inside\n"
  },
  {
    "path": "mmdet/models/dense_heads/autoassign_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import bias_init_with_prob, normal_init\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import multi_apply\nfrom mmdet.core.anchor.point_generator import MlvlPointGenerator\nfrom mmdet.core.bbox import bbox_overlaps\nfrom mmdet.models import HEADS\nfrom mmdet.models.dense_heads.atss_head import reduce_mean\nfrom mmdet.models.dense_heads.fcos_head import FCOSHead\nfrom mmdet.models.dense_heads.paa_head import levels_to_images\n\nEPS = 1e-12\n\n\nclass CenterPrior(nn.Module):\n    \"\"\"Center Weighting module to adjust the category-specific prior\n    distributions.\n\n    Args:\n        force_topk (bool): When no point falls into gt_bbox, forcibly\n            select the k points closest to the center to calculate\n            the center prior. Defaults to False.\n        topk (int): The number of points used to calculate the\n            center prior when no point falls in gt_bbox. Only work when\n            force_topk if True. Defaults to 9.\n        num_classes (int): The class number of dataset. Defaults to 80.\n        strides (tuple[int]): The stride of each input feature map. Defaults\n            to (8, 16, 32, 64, 128).\n    \"\"\"\n\n    def __init__(self,\n                 force_topk=False,\n                 topk=9,\n                 num_classes=80,\n                 strides=(8, 16, 32, 64, 128)):\n        super(CenterPrior, self).__init__()\n        self.mean = nn.Parameter(torch.zeros(num_classes, 2))\n        self.sigma = nn.Parameter(torch.ones(num_classes, 2))\n        self.strides = strides\n        self.force_topk = force_topk\n        self.topk = topk\n\n    def forward(self, anchor_points_list, gt_bboxes, labels,\n                inside_gt_bbox_mask):\n        \"\"\"Get the center prior of each point on the feature map for each\n        instance.\n\n        Args:\n            anchor_points_list (list[Tensor]): list of coordinate\n                of points on feature map. Each with shape\n                (num_points, 2).\n            gt_bboxes (Tensor): The gt_bboxes with shape of\n                (num_gt, 4).\n            labels (Tensor): The gt_labels with shape of (num_gt).\n            inside_gt_bbox_mask (Tensor): Tensor of bool type,\n                with shape of (num_points, num_gt), each\n                value is used to mark whether this point falls\n                within a certain gt.\n\n        Returns:\n            tuple(Tensor):\n\n                - center_prior_weights(Tensor): Float tensor with shape \\\n                    of (num_points, num_gt). Each value represents \\\n                    the center weighting coefficient.\n                - inside_gt_bbox_mask (Tensor): Tensor of bool type, \\\n                    with shape of (num_points, num_gt), each \\\n                    value is used to mark whether this point falls \\\n                    within a certain gt or is the topk nearest points for \\\n                    a specific gt_bbox.\n        \"\"\"\n        inside_gt_bbox_mask = inside_gt_bbox_mask.clone()\n        num_gts = len(labels)\n        num_points = sum([len(item) for item in anchor_points_list])\n        if num_gts == 0:\n            return gt_bboxes.new_zeros(num_points,\n                                       num_gts), inside_gt_bbox_mask\n        center_prior_list = []\n        for slvl_points, stride in zip(anchor_points_list, self.strides):\n            # slvl_points: points from single level in FPN, has shape (h*w, 2)\n            # single_level_points has shape (h*w, num_gt, 2)\n            single_level_points = slvl_points[:, None, :].expand(\n                (slvl_points.size(0), len(gt_bboxes), 2))\n            gt_center_x = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2)\n            gt_center_y = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2)\n            gt_center = torch.stack((gt_center_x, gt_center_y), dim=1)\n            gt_center = gt_center[None]\n            # instance_center has shape (1, num_gt, 2)\n            instance_center = self.mean[labels][None]\n            # instance_sigma has shape (1, num_gt, 2)\n            instance_sigma = self.sigma[labels][None]\n            # distance has shape (num_points, num_gt, 2)\n            distance = (((single_level_points - gt_center) / float(stride) -\n                         instance_center)**2)\n            center_prior = torch.exp(-distance /\n                                     (2 * instance_sigma**2)).prod(dim=-1)\n            center_prior_list.append(center_prior)\n        center_prior_weights = torch.cat(center_prior_list, dim=0)\n\n        if self.force_topk:\n            gt_inds_no_points_inside = torch.nonzero(\n                inside_gt_bbox_mask.sum(0) == 0).reshape(-1)\n            if gt_inds_no_points_inside.numel():\n                topk_center_index = \\\n                    center_prior_weights[:, gt_inds_no_points_inside].topk(\n                                                             self.topk,\n                                                             dim=0)[1]\n                temp_mask = inside_gt_bbox_mask[:, gt_inds_no_points_inside]\n                inside_gt_bbox_mask[:, gt_inds_no_points_inside] = \\\n                    torch.scatter(temp_mask,\n                                  dim=0,\n                                  index=topk_center_index,\n                                  src=torch.ones_like(\n                                    topk_center_index,\n                                    dtype=torch.bool))\n\n        center_prior_weights[~inside_gt_bbox_mask] = 0\n        return center_prior_weights, inside_gt_bbox_mask\n\n\n@HEADS.register_module()\nclass AutoAssignHead(FCOSHead):\n    \"\"\"AutoAssignHead head used in AutoAssign.\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/2007.03496>`_ .\n\n    Args:\n        force_topk (bool): Used in center prior initialization to\n            handle extremely small gt. Default is False.\n        topk (int): The number of points used to calculate the\n            center prior when no point falls in gt_bbox. Only work when\n            force_topk if True. Defaults to 9.\n        pos_loss_weight (float): The loss weight of positive loss\n            and with default value 0.25.\n        neg_loss_weight (float): The loss weight of negative loss\n            and with default value 0.75.\n        center_loss_weight (float): The loss weight of center prior\n            loss and with default value 0.75.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 force_topk=False,\n                 topk=9,\n                 pos_loss_weight=0.25,\n                 neg_loss_weight=0.75,\n                 center_loss_weight=0.75,\n                 **kwargs):\n        super().__init__(*args, conv_bias=True, **kwargs)\n        self.center_prior = CenterPrior(\n            force_topk=force_topk,\n            topk=topk,\n            num_classes=self.num_classes,\n            strides=self.strides)\n        self.pos_loss_weight = pos_loss_weight\n        self.neg_loss_weight = neg_loss_weight\n        self.center_loss_weight = center_loss_weight\n        self.prior_generator = MlvlPointGenerator(self.strides, offset=0)\n\n    def init_weights(self):\n        \"\"\"Initialize weights of the head.\n\n        In particular, we have special initialization for classified conv's and\n        regression conv's bias\n        \"\"\"\n\n        super(AutoAssignHead, self).init_weights()\n        bias_cls = bias_init_with_prob(0.02)\n        normal_init(self.conv_cls, std=0.01, bias=bias_cls)\n        normal_init(self.conv_reg, std=0.01, bias=4.0)\n\n    def forward_single(self, x, scale, stride):\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n            scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n            stride (int): The corresponding stride for feature maps, only\n                used to normalize the bbox prediction when self.norm_on_bbox\n                is True.\n\n        Returns:\n            tuple: scores for each class, bbox predictions and centerness \\\n                predictions of input feature maps.\n        \"\"\"\n        cls_score, bbox_pred, cls_feat, reg_feat = super(\n            FCOSHead, self).forward_single(x)\n        centerness = self.conv_centerness(reg_feat)\n        # scale the bbox_pred of different level\n        # float to avoid overflow when enabling FP16\n        bbox_pred = scale(bbox_pred).float()\n        # bbox_pred needed for gradient computation has been modified\n        # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace\n        # F.relu(bbox_pred) with bbox_pred.clamp(min=0)\n        bbox_pred = bbox_pred.clamp(min=0)\n        bbox_pred *= stride\n        return cls_score, bbox_pred, centerness\n\n    def get_pos_loss_single(self, cls_score, objectness, reg_loss, gt_labels,\n                            center_prior_weights):\n        \"\"\"Calculate the positive loss of all points in gt_bboxes.\n\n        Args:\n            cls_score (Tensor): All category scores for each point on\n                the feature map. The shape is (num_points, num_class).\n            objectness (Tensor): Foreground probability of all points,\n                has shape (num_points, 1).\n            reg_loss (Tensor): The regression loss of each gt_bbox and each\n                prediction box, has shape of (num_points, num_gt).\n            gt_labels (Tensor): The zeros based gt_labels of all gt\n                with shape of (num_gt,).\n            center_prior_weights (Tensor): Float tensor with shape\n                of (num_points, num_gt). Each value represents\n                the center weighting coefficient.\n\n        Returns:\n            tuple[Tensor]:\n\n                - pos_loss (Tensor): The positive loss of all points\n                  in the gt_bboxes.\n        \"\"\"\n        # p_loc: localization confidence\n        p_loc = torch.exp(-reg_loss)\n        # p_cls: classification confidence\n        p_cls = (cls_score * objectness)[:, gt_labels]\n        # p_pos: joint confidence indicator\n        p_pos = p_cls * p_loc\n\n        # 3 is a hyper-parameter to control the contributions of high and\n        # low confidence locations towards positive losses.\n        confidence_weight = torch.exp(p_pos * 3)\n        p_pos_weight = (confidence_weight * center_prior_weights) / (\n            (confidence_weight * center_prior_weights).sum(\n                0, keepdim=True)).clamp(min=EPS)\n        reweighted_p_pos = (p_pos * p_pos_weight).sum(0)\n        pos_loss = F.binary_cross_entropy(\n            reweighted_p_pos,\n            torch.ones_like(reweighted_p_pos),\n            reduction='none')\n        pos_loss = pos_loss.sum() * self.pos_loss_weight\n        return pos_loss,\n\n    def get_neg_loss_single(self, cls_score, objectness, gt_labels, ious,\n                            inside_gt_bbox_mask):\n        \"\"\"Calculate the negative loss of all points in feature map.\n\n        Args:\n            cls_score (Tensor): All category scores for each point on\n                the feature map. The shape is (num_points, num_class).\n            objectness (Tensor): Foreground probability of all points\n                and is shape of (num_points, 1).\n            gt_labels (Tensor): The zeros based label of all gt with shape of\n                (num_gt).\n            ious (Tensor): Float tensor with shape of (num_points, num_gt).\n                Each value represent the iou of pred_bbox and gt_bboxes.\n            inside_gt_bbox_mask (Tensor): Tensor of bool type,\n                with shape of (num_points, num_gt), each\n                value is used to mark whether this point falls\n                within a certain gt.\n\n        Returns:\n            tuple[Tensor]:\n\n                - neg_loss (Tensor): The negative loss of all points\n                  in the feature map.\n        \"\"\"\n        num_gts = len(gt_labels)\n        joint_conf = (cls_score * objectness)\n        p_neg_weight = torch.ones_like(joint_conf)\n        if num_gts > 0:\n            # the order of dinmension would affect the value of\n            # p_neg_weight, we strictly follow the original\n            # implementation.\n            inside_gt_bbox_mask = inside_gt_bbox_mask.permute(1, 0)\n            ious = ious.permute(1, 0)\n\n            foreground_idxs = torch.nonzero(inside_gt_bbox_mask, as_tuple=True)\n            temp_weight = (1 / (1 - ious[foreground_idxs]).clamp_(EPS))\n\n            def normalize(x):\n                return (x - x.min() + EPS) / (x.max() - x.min() + EPS)\n\n            for instance_idx in range(num_gts):\n                idxs = foreground_idxs[0] == instance_idx\n                if idxs.any():\n                    temp_weight[idxs] = normalize(temp_weight[idxs])\n\n            p_neg_weight[foreground_idxs[1],\n                         gt_labels[foreground_idxs[0]]] = 1 - temp_weight\n\n        logits = (joint_conf * p_neg_weight)\n        neg_loss = (\n            logits**2 * F.binary_cross_entropy(\n                logits, torch.zeros_like(logits), reduction='none'))\n        neg_loss = neg_loss.sum() * self.neg_loss_weight\n        return neg_loss,\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             objectnesses,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute loss of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is\n                num_points * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_points * 4.\n            objectnesses (list[Tensor]): objectness for each scale level, each\n                is a 4D-tensor, the channel number is num_points * 1.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        assert len(cls_scores) == len(bbox_preds) == len(objectnesses)\n        all_num_gt = sum([len(item) for item in gt_bboxes])\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        all_level_points = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device)\n        inside_gt_bbox_mask_list, bbox_targets_list = self.get_targets(\n            all_level_points, gt_bboxes)\n\n        center_prior_weight_list = []\n        temp_inside_gt_bbox_mask_list = []\n        for gt_bboxe, gt_label, inside_gt_bbox_mask in zip(\n                gt_bboxes, gt_labels, inside_gt_bbox_mask_list):\n            center_prior_weight, inside_gt_bbox_mask = \\\n                self.center_prior(all_level_points, gt_bboxe, gt_label,\n                                  inside_gt_bbox_mask)\n            center_prior_weight_list.append(center_prior_weight)\n            temp_inside_gt_bbox_mask_list.append(inside_gt_bbox_mask)\n        inside_gt_bbox_mask_list = temp_inside_gt_bbox_mask_list\n        mlvl_points = torch.cat(all_level_points, dim=0)\n        bbox_preds = levels_to_images(bbox_preds)\n        cls_scores = levels_to_images(cls_scores)\n        objectnesses = levels_to_images(objectnesses)\n\n        reg_loss_list = []\n        ious_list = []\n        num_points = len(mlvl_points)\n\n        for bbox_pred, encoded_targets, inside_gt_bbox_mask in zip(\n                bbox_preds, bbox_targets_list, inside_gt_bbox_mask_list):\n            temp_num_gt = encoded_targets.size(1)\n            expand_mlvl_points = mlvl_points[:, None, :].expand(\n                num_points, temp_num_gt, 2).reshape(-1, 2)\n            encoded_targets = encoded_targets.reshape(-1, 4)\n            expand_bbox_pred = bbox_pred[:, None, :].expand(\n                num_points, temp_num_gt, 4).reshape(-1, 4)\n            decoded_bbox_preds = self.bbox_coder.decode(\n                expand_mlvl_points, expand_bbox_pred)\n            decoded_target_preds = self.bbox_coder.decode(\n                expand_mlvl_points, encoded_targets)\n            with torch.no_grad():\n                ious = bbox_overlaps(\n                    decoded_bbox_preds, decoded_target_preds, is_aligned=True)\n                ious = ious.reshape(num_points, temp_num_gt)\n                if temp_num_gt:\n                    ious = ious.max(\n                        dim=-1, keepdim=True).values.repeat(1, temp_num_gt)\n                else:\n                    ious = ious.new_zeros(num_points, temp_num_gt)\n                ious[~inside_gt_bbox_mask] = 0\n                ious_list.append(ious)\n            loss_bbox = self.loss_bbox(\n                decoded_bbox_preds,\n                decoded_target_preds,\n                weight=None,\n                reduction_override='none')\n            reg_loss_list.append(loss_bbox.reshape(num_points, temp_num_gt))\n\n        cls_scores = [item.sigmoid() for item in cls_scores]\n        objectnesses = [item.sigmoid() for item in objectnesses]\n        pos_loss_list, = multi_apply(self.get_pos_loss_single, cls_scores,\n                                     objectnesses, reg_loss_list, gt_labels,\n                                     center_prior_weight_list)\n        pos_avg_factor = reduce_mean(\n            bbox_pred.new_tensor(all_num_gt)).clamp_(min=1)\n        pos_loss = sum(pos_loss_list) / pos_avg_factor\n\n        neg_loss_list, = multi_apply(self.get_neg_loss_single, cls_scores,\n                                     objectnesses, gt_labels, ious_list,\n                                     inside_gt_bbox_mask_list)\n        neg_avg_factor = sum(item.data.sum()\n                             for item in center_prior_weight_list)\n        neg_avg_factor = reduce_mean(neg_avg_factor).clamp_(min=1)\n        neg_loss = sum(neg_loss_list) / neg_avg_factor\n\n        center_loss = []\n        for i in range(len(img_metas)):\n\n            if inside_gt_bbox_mask_list[i].any():\n                center_loss.append(\n                    len(gt_bboxes[i]) /\n                    center_prior_weight_list[i].sum().clamp_(min=EPS))\n            # when width or height of gt_bbox is smaller than stride of p3\n            else:\n                center_loss.append(center_prior_weight_list[i].sum() * 0)\n\n        center_loss = torch.stack(center_loss).mean() * self.center_loss_weight\n\n        # avoid dead lock in DDP\n        if all_num_gt == 0:\n            pos_loss = bbox_preds[0].sum() * 0\n            dummy_center_prior_loss = self.center_prior.mean.sum(\n            ) * 0 + self.center_prior.sigma.sum() * 0\n            center_loss = objectnesses[0].sum() * 0 + dummy_center_prior_loss\n\n        loss = dict(\n            loss_pos=pos_loss, loss_neg=neg_loss, loss_center=center_loss)\n\n        return loss\n\n    def get_targets(self, points, gt_bboxes_list):\n        \"\"\"Compute regression targets and each point inside or outside gt_bbox\n        in multiple images.\n\n        Args:\n            points (list[Tensor]): Points of all fpn level, each has shape\n                (num_points, 2).\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,\n                each has shape (num_gt, 4).\n\n        Returns:\n            tuple(list[Tensor]):\n\n                - inside_gt_bbox_mask_list (list[Tensor]): Each\n                  Tensor is with bool type and shape of\n                  (num_points, num_gt), each value\n                  is used to mark whether this point falls\n                  within a certain gt.\n                - concat_lvl_bbox_targets (list[Tensor]): BBox\n                  targets of each level. Each tensor has shape\n                  (num_points, num_gt, 4).\n        \"\"\"\n\n        concat_points = torch.cat(points, dim=0)\n        # the number of points per img, per lvl\n        inside_gt_bbox_mask_list, bbox_targets_list = multi_apply(\n            self._get_target_single, gt_bboxes_list, points=concat_points)\n        return inside_gt_bbox_mask_list, bbox_targets_list\n\n    def _get_target_single(self, gt_bboxes, points):\n        \"\"\"Compute regression targets and each point inside or outside gt_bbox\n        for a single image.\n\n        Args:\n            gt_bboxes (Tensor): gt_bbox of single image, has shape\n                (num_gt, 4).\n            points (Tensor): Points of all fpn level, has shape\n                (num_points, 2).\n\n        Returns:\n            tuple[Tensor]: Containing the following Tensors:\n\n                - inside_gt_bbox_mask (Tensor): Bool tensor with shape\n                  (num_points, num_gt), each value is used to mark\n                  whether this point falls within a certain gt.\n                - bbox_targets (Tensor): BBox targets of each points with\n                  each gt_bboxes, has shape (num_points, num_gt, 4).\n        \"\"\"\n        num_points = points.size(0)\n        num_gts = gt_bboxes.size(0)\n        gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)\n        xs, ys = points[:, 0], points[:, 1]\n        xs = xs[:, None]\n        ys = ys[:, None]\n        left = xs - gt_bboxes[..., 0]\n        right = gt_bboxes[..., 2] - xs\n        top = ys - gt_bboxes[..., 1]\n        bottom = gt_bboxes[..., 3] - ys\n        bbox_targets = torch.stack((left, top, right, bottom), -1)\n        if num_gts:\n            inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0\n        else:\n            inside_gt_bbox_mask = bbox_targets.new_zeros((num_points, num_gts),\n                                                         dtype=torch.bool)\n\n        return inside_gt_bbox_mask, bbox_targets\n\n    def _get_points_single(self,\n                           featmap_size,\n                           stride,\n                           dtype,\n                           device,\n                           flatten=False):\n        \"\"\"Almost the same as the implementation in fcos, we remove half stride\n        offset to align with the original implementation.\n\n        This function will be deprecated soon.\n        \"\"\"\n        warnings.warn(\n            '`_get_points_single` in `AutoAssignHead` will be '\n            'deprecated soon, we support a multi level point generator now'\n            'you can get points of a single level feature map '\n            'with `self.prior_generator.single_level_grid_priors` ')\n        y, x = super(FCOSHead,\n                     self)._get_points_single(featmap_size, stride, dtype,\n                                              device)\n        points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride),\n                             dim=-1)\n        return points\n"
  },
  {
    "path": "mmdet/models/dense_heads/base_dense_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nfrom mmcv.cnn.utils.weight_init import constant_init\nfrom mmcv.ops import batched_nms\nfrom mmcv.runner import BaseModule, force_fp32\n\nfrom mmdet.core.utils import filter_scores_and_topk, select_single_mlvl\n\n\nclass BaseDenseHead(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for DenseHeads.\"\"\"\n\n    def __init__(self, init_cfg=None):\n        super(BaseDenseHead, self).__init__(init_cfg)\n\n    def init_weights(self):\n        super(BaseDenseHead, self).init_weights()\n        # avoid init_cfg overwrite the initialization of `conv_offset`\n        for m in self.modules():\n            # DeformConv2dPack, ModulatedDeformConv2dPack\n            if hasattr(m, 'conv_offset'):\n                constant_init(m.conv_offset, 0)\n\n    @abstractmethod\n    def loss(self, **kwargs):\n        \"\"\"Compute losses of the head.\"\"\"\n        pass\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def get_bboxes(self,\n                   cls_scores,\n                   bbox_preds,\n                   score_factors=None,\n                   img_metas=None,\n                   cfg=None,\n                   rescale=False,\n                   with_nms=True,\n                   **kwargs):\n        \"\"\"Transform network outputs of a batch into bbox results.\n\n        Note: When score_factors is not None, the cls_scores are\n        usually multiplied by it then obtain the real score used in NMS,\n        such as CenterNess in FCOS, IoU branch in ATSS.\n\n        Args:\n            cls_scores (list[Tensor]): Classification scores for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 4, H, W).\n            score_factors (list[Tensor], Optional): Score factor for\n                all scale level, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 1, H, W). Default None.\n            img_metas (list[dict], Optional): Image meta info. Default None.\n            cfg (mmcv.Config, Optional): Test / postprocessing configuration,\n                if None, test_cfg would be used.  Default None.\n            rescale (bool): If True, return boxes in original image space.\n                Default False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default True.\n\n        Returns:\n            list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n                The first item is an (n, 5) tensor, where the first 4 columns\n                are bounding box positions (tl_x, tl_y, br_x, br_y) and the\n                5-th column is a score between 0 and 1. The second item is a\n                (n,) tensor where each item is the predicted class label of\n                the corresponding box.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n\n        if score_factors is None:\n            # e.g. Retina, FreeAnchor, Foveabox, etc.\n            with_score_factors = False\n        else:\n            # e.g. FCOS, PAA, ATSS, AutoAssign, etc.\n            with_score_factors = True\n            assert len(cls_scores) == len(score_factors)\n\n        num_levels = len(cls_scores)\n\n        featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]\n        mlvl_priors = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=cls_scores[0].dtype,\n            device=cls_scores[0].device)\n\n        result_list = []\n\n        for img_id in range(len(img_metas)):\n            img_meta = img_metas[img_id]\n            cls_score_list = select_single_mlvl(cls_scores, img_id)\n            bbox_pred_list = select_single_mlvl(bbox_preds, img_id)\n            if with_score_factors:\n                score_factor_list = select_single_mlvl(score_factors, img_id)\n            else:\n                score_factor_list = [None for _ in range(num_levels)]\n\n            results = self._get_bboxes_single(cls_score_list, bbox_pred_list,\n                                              score_factor_list, mlvl_priors,\n                                              img_meta, cfg, rescale, with_nms,\n                                              **kwargs)\n            result_list.append(results)\n        return result_list\n\n    def _get_bboxes_single(self,\n                           cls_score_list,\n                           bbox_pred_list,\n                           score_factor_list,\n                           mlvl_priors,\n                           img_meta,\n                           cfg,\n                           rescale=False,\n                           with_nms=True,\n                           **kwargs):\n        \"\"\"Transform outputs of a single image into bbox predictions.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image, each item has shape\n                (num_priors * 1, H, W).\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid. In all\n                anchor-based methods, it has shape (num_priors, 4). In\n                all anchor-free methods, it has shape (num_priors, 2)\n                when `with_stride=True`, otherwise it still has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n                is False and mlvl_score_factor is None, return mlvl_bboxes and\n                mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n                mlvl_score_factor. Usually with_nms is False is used for aug\n                test. If with_nms is True, then return the following format\n\n                - det_bboxes (Tensor): Predicted bboxes with shape \\\n                    [num_bboxes, 5], where the first 4 columns are bounding \\\n                    box positions (tl_x, tl_y, br_x, br_y) and the 5-th \\\n                    column are scores between 0 and 1.\n                - det_labels (Tensor): Predicted labels of the corresponding \\\n                    box with shape [num_bboxes].\n        \"\"\"\n        if score_factor_list[0] is None:\n            # e.g. Retina, FreeAnchor, etc.\n            with_score_factors = False\n        else:\n            # e.g. FCOS, PAA, ATSS, etc.\n            with_score_factors = True\n\n        cfg = self.test_cfg if cfg is None else cfg\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_labels = []\n        if with_score_factors:\n            mlvl_score_factors = []\n        else:\n            mlvl_score_factors = None\n        for level_idx, (cls_score, bbox_pred, score_factor, priors) in \\\n                enumerate(zip(cls_score_list, bbox_pred_list,\n                              score_factor_list, mlvl_priors)):\n\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n            if with_score_factors:\n                score_factor = score_factor.permute(1, 2,\n                                                    0).reshape(-1).sigmoid()\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                # remind that we set FG labels to [0, num_class-1]\n                # since mmdet v2.0\n                # BG cat_id: num_class\n                scores = cls_score.softmax(-1)[:, :-1]\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(bbox_pred=bbox_pred, priors=priors))\n            scores, labels, keep_idxs, filtered_results = results\n\n            bbox_pred = filtered_results['bbox_pred']\n            priors = filtered_results['priors']\n\n            if with_score_factors:\n                score_factor = score_factor[keep_idxs]\n\n            bboxes = self.bbox_coder.decode(\n                priors, bbox_pred, max_shape=img_shape)\n\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n            if with_score_factors:\n                mlvl_score_factors.append(score_factor)\n\n        return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes,\n                                       img_meta['scale_factor'], cfg, rescale,\n                                       with_nms, mlvl_score_factors, **kwargs)\n\n    def _bbox_post_process(self,\n                           mlvl_scores,\n                           mlvl_labels,\n                           mlvl_bboxes,\n                           scale_factor,\n                           cfg,\n                           rescale=False,\n                           with_nms=True,\n                           mlvl_score_factors=None,\n                           **kwargs):\n        \"\"\"bbox post-processing method.\n\n        The boxes would be rescaled to the original image scale and do\n        the nms operation. Usually `with_nms` is False is used for aug test.\n\n        Args:\n            mlvl_scores (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_bboxes, ).\n            mlvl_labels (list[Tensor]): Box class labels from all scale\n                levels of a single image, each item has shape\n                (num_bboxes, ).\n            mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale\n                levels of a single image, each item has shape (num_bboxes, 4).\n            scale_factor (ndarray, optional): Scale factor of the image arange\n                as (w_scale, h_scale, w_scale, h_scale).\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n            mlvl_score_factors (list[Tensor], optional): Score factor from\n                all scale levels of a single image, each item has shape\n                (num_bboxes, ). Default: None.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n                is False and mlvl_score_factor is None, return mlvl_bboxes and\n                mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n                mlvl_score_factor. Usually with_nms is False is used for aug\n                test. If with_nms is True, then return the following format\n\n                - det_bboxes (Tensor): Predicted bboxes with shape \\\n                    [num_bboxes, 5], where the first 4 columns are bounding \\\n                    box positions (tl_x, tl_y, br_x, br_y) and the 5-th \\\n                    column are scores between 0 and 1.\n                - det_labels (Tensor): Predicted labels of the corresponding \\\n                    box with shape [num_bboxes].\n        \"\"\"\n        assert len(mlvl_scores) == len(mlvl_bboxes) == len(mlvl_labels)\n\n        mlvl_bboxes = torch.cat(mlvl_bboxes)\n        if rescale:\n            mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n        mlvl_scores = torch.cat(mlvl_scores)\n        mlvl_labels = torch.cat(mlvl_labels)\n\n        if mlvl_score_factors is not None:\n            # TODO： Add sqrt operation in order to be consistent with\n            #  the paper.\n            mlvl_score_factors = torch.cat(mlvl_score_factors)\n            mlvl_scores = mlvl_scores * mlvl_score_factors\n\n        if with_nms:\n            if mlvl_bboxes.numel() == 0:\n                det_bboxes = torch.cat([mlvl_bboxes, mlvl_scores[:, None]], -1)\n                return det_bboxes, mlvl_labels\n\n            det_bboxes, keep_idxs = batched_nms(mlvl_bboxes, mlvl_scores,\n                                                mlvl_labels, cfg.nms)\n            det_bboxes = det_bboxes[:cfg.max_per_img]\n            det_labels = mlvl_labels[keep_idxs][:cfg.max_per_img]\n            return det_bboxes, det_labels\n        else:\n            return mlvl_bboxes, mlvl_scores, mlvl_labels\n\n    def forward_train(self,\n                      x,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels=None,\n                      gt_bboxes_ignore=None,\n                      proposal_cfg=None,\n                      **kwargs):\n        \"\"\"\n        Args:\n            x (list[Tensor]): Features from FPN.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes (Tensor): Ground truth bboxes of the image,\n                shape (num_gts, 4).\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts,).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n            proposal_cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used\n\n        Returns:\n            tuple:\n                losses: (dict[str, Tensor]): A dictionary of loss components.\n                proposal_list (list[Tensor]): Proposals of each image.\n        \"\"\"\n        outs = self(x)\n        if gt_labels is None:\n            loss_inputs = outs + (gt_bboxes, img_metas)\n        else:\n            loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)\n        losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n        if proposal_cfg is None:\n            return losses\n        else:\n            proposal_list = self.get_bboxes(\n                *outs, img_metas=img_metas, cfg=proposal_cfg)\n            return losses, proposal_list\n\n    def simple_test(self, feats, img_metas, rescale=False):\n        \"\"\"Test function without test-time augmentation.\n\n        Args:\n            feats (tuple[torch.Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            img_metas (list[dict]): List of image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n                The first item is ``bboxes`` with shape (n, 5),\n                where 5 represent (tl_x, tl_y, br_x, br_y, score).\n                The shape of the second tensor in the tuple is ``labels``\n                with shape (n, ).\n        \"\"\"\n        return self.simple_test_bboxes(feats, img_metas, rescale=rescale)\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def onnx_export(self,\n                    cls_scores,\n                    bbox_preds,\n                    score_factors=None,\n                    img_metas=None,\n                    with_nms=True):\n        \"\"\"Transform network output for a batch into bbox predictions.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                with shape (N, num_points * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_points * 4, H, W).\n            score_factors (list[Tensor]): score_factors for each s\n                cale level with shape (N, num_points * 1, H, W).\n                Default: None.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc. Default: None.\n            with_nms (bool): Whether apply nms to the bboxes. Default: True.\n\n        Returns:\n            tuple[Tensor, Tensor] | list[tuple]: When `with_nms` is True,\n            it is tuple[Tensor, Tensor], first tensor bboxes with shape\n            [N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score)\n            and second element is class labels of shape [N, num_det].\n            When `with_nms` is False, first tensor is bboxes with\n            shape [N, num_det, 4], second tensor is raw score has\n            shape  [N, num_det, num_classes].\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n\n        num_levels = len(cls_scores)\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        mlvl_priors = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device)\n\n        mlvl_cls_scores = [cls_scores[i].detach() for i in range(num_levels)]\n        mlvl_bbox_preds = [bbox_preds[i].detach() for i in range(num_levels)]\n\n        assert len(\n            img_metas\n        ) == 1, 'Only support one input image while in exporting to ONNX'\n        img_shape = img_metas[0]['img_shape_for_onnx']\n\n        cfg = self.test_cfg\n        assert len(cls_scores) == len(bbox_preds) == len(mlvl_priors)\n        device = cls_scores[0].device\n        batch_size = cls_scores[0].shape[0]\n        # convert to tensor to keep tracing\n        nms_pre_tensor = torch.tensor(\n            cfg.get('nms_pre', -1), device=device, dtype=torch.long)\n\n        # e.g. Retina, FreeAnchor, etc.\n        if score_factors is None:\n            with_score_factors = False\n            mlvl_score_factor = [None for _ in range(num_levels)]\n        else:\n            # e.g. FCOS, PAA, ATSS, etc.\n            with_score_factors = True\n            mlvl_score_factor = [\n                score_factors[i].detach() for i in range(num_levels)\n            ]\n            mlvl_score_factors = []\n\n        mlvl_batch_bboxes = []\n        mlvl_scores = []\n\n        for cls_score, bbox_pred, score_factors, priors in zip(\n                mlvl_cls_scores, mlvl_bbox_preds, mlvl_score_factor,\n                mlvl_priors):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            scores = cls_score.permute(0, 2, 3,\n                                       1).reshape(batch_size, -1,\n                                                  self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = scores.sigmoid()\n                nms_pre_score = scores\n            else:\n                scores = scores.softmax(-1)\n                nms_pre_score = scores\n\n            if with_score_factors:\n                score_factors = score_factors.permute(0, 2, 3, 1).reshape(\n                    batch_size, -1).sigmoid()\n            bbox_pred = bbox_pred.permute(0, 2, 3,\n                                          1).reshape(batch_size, -1, 4)\n            priors = priors.expand(batch_size, -1, priors.size(-1))\n            # Get top-k predictions\n            from mmdet.core.export import get_k_for_topk\n            nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1])\n            if nms_pre > 0:\n\n                if with_score_factors:\n                    nms_pre_score = (nms_pre_score * score_factors[..., None])\n                else:\n                    nms_pre_score = nms_pre_score\n\n                # Get maximum scores for foreground classes.\n                if self.use_sigmoid_cls:\n                    max_scores, _ = nms_pre_score.max(-1)\n                else:\n                    # remind that we set FG labels to [0, num_class-1]\n                    # since mmdet v2.0\n                    # BG cat_id: num_class\n                    max_scores, _ = nms_pre_score[..., :-1].max(-1)\n                _, topk_inds = max_scores.topk(nms_pre)\n\n                batch_inds = torch.arange(\n                    batch_size, device=bbox_pred.device).view(\n                        -1, 1).expand_as(topk_inds).long()\n                # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501\n                transformed_inds = bbox_pred.shape[1] * batch_inds + topk_inds\n                priors = priors.reshape(\n                    -1, priors.size(-1))[transformed_inds, :].reshape(\n                        batch_size, -1, priors.size(-1))\n                bbox_pred = bbox_pred.reshape(-1,\n                                              4)[transformed_inds, :].reshape(\n                                                  batch_size, -1, 4)\n                scores = scores.reshape(\n                    -1, self.cls_out_channels)[transformed_inds, :].reshape(\n                        batch_size, -1, self.cls_out_channels)\n                if with_score_factors:\n                    score_factors = score_factors.reshape(\n                        -1, 1)[transformed_inds].reshape(batch_size, -1)\n\n            bboxes = self.bbox_coder.decode(\n                priors, bbox_pred, max_shape=img_shape)\n\n            mlvl_batch_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            if with_score_factors:\n                mlvl_score_factors.append(score_factors)\n\n        batch_bboxes = torch.cat(mlvl_batch_bboxes, dim=1)\n        batch_scores = torch.cat(mlvl_scores, dim=1)\n        if with_score_factors:\n            batch_score_factors = torch.cat(mlvl_score_factors, dim=1)\n\n        # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment\n\n        from mmdet.core.export import add_dummy_nms_for_onnx\n\n        if not self.use_sigmoid_cls:\n            batch_scores = batch_scores[..., :self.num_classes]\n\n        if with_score_factors:\n            batch_scores = batch_scores * (batch_score_factors.unsqueeze(2))\n\n        if with_nms:\n            max_output_boxes_per_class = cfg.nms.get(\n                'max_output_boxes_per_class', 200)\n            iou_threshold = cfg.nms.get('iou_threshold', 0.5)\n            score_threshold = cfg.score_thr\n            nms_pre = cfg.get('deploy_nms_pre', -1)\n            return add_dummy_nms_for_onnx(batch_bboxes, batch_scores,\n                                          max_output_boxes_per_class,\n                                          iou_threshold, score_threshold,\n                                          nms_pre, cfg.max_per_img)\n        else:\n            return batch_bboxes, batch_scores\n"
  },
  {
    "path": "mmdet/models/dense_heads/base_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nfrom mmcv.runner import BaseModule\n\n\nclass BaseMaskHead(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for mask heads used in One-Stage Instance Segmentation.\"\"\"\n\n    def __init__(self, init_cfg):\n        super(BaseMaskHead, self).__init__(init_cfg)\n\n    @abstractmethod\n    def loss(self, **kwargs):\n        pass\n\n    @abstractmethod\n    def get_results(self, **kwargs):\n        \"\"\"Get precessed :obj:`InstanceData` of multiple images.\"\"\"\n        pass\n\n    def forward_train(self,\n                      x,\n                      gt_labels,\n                      gt_masks,\n                      img_metas,\n                      gt_bboxes=None,\n                      gt_bboxes_ignore=None,\n                      positive_infos=None,\n                      **kwargs):\n        \"\"\"\n        Args:\n            x (list[Tensor] | tuple[Tensor]): Features from FPN.\n                Each has a shape (B, C, H, W).\n            gt_labels (list[Tensor]): Ground truth labels of all images.\n                each has a shape (num_gts,).\n            gt_masks (list[Tensor]) : Masks for each bbox, has a shape\n                (num_gts, h , w).\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes (list[Tensor]): Ground truth bboxes of the image,\n                each item has a shape (num_gts, 4).\n            gt_bboxes_ignore (list[Tensor], None): Ground truth bboxes to be\n                ignored, each item has a shape (num_ignored_gts, 4).\n            positive_infos (list[:obj:`InstanceData`], optional): Information\n                of positive samples. Used when the label assignment is\n                done outside the MaskHead, e.g., in BboxHead in\n                YOLACT or CondInst, etc. When the label assignment is done in\n                MaskHead, it would be None, like SOLO. All values\n                in it should have shape (num_positive_samples, *).\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        if positive_infos is None:\n            outs = self(x)\n        else:\n            outs = self(x, positive_infos)\n\n        assert isinstance(outs, tuple), 'Forward results should be a tuple, ' \\\n                                        'even if only one item is returned'\n        loss = self.loss(\n            *outs,\n            gt_labels=gt_labels,\n            gt_masks=gt_masks,\n            img_metas=img_metas,\n            gt_bboxes=gt_bboxes,\n            gt_bboxes_ignore=gt_bboxes_ignore,\n            positive_infos=positive_infos,\n            **kwargs)\n        return loss\n\n    def simple_test(self,\n                    feats,\n                    img_metas,\n                    rescale=False,\n                    instances_list=None,\n                    **kwargs):\n        \"\"\"Test function without test-time augmentation.\n\n        Args:\n            feats (tuple[torch.Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            img_metas (list[dict]): List of image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n            instances_list (list[obj:`InstanceData`], optional): Detection\n                results of each image after the post process. Only exist\n                if there is a `bbox_head`, like `YOLACT`, `CondInst`, etc.\n\n        Returns:\n            list[obj:`InstanceData`]: Instance segmentation \\\n                results of each image after the post process. \\\n                Each item usually contains following keys. \\\n\n                - scores (Tensor): Classification scores, has a shape\n                  (num_instance,)\n                - labels (Tensor): Has a shape (num_instances,).\n                - masks (Tensor): Processed mask results, has a\n                  shape (num_instances, h, w).\n        \"\"\"\n        if instances_list is None:\n            outs = self(feats)\n        else:\n            outs = self(feats, instances_list=instances_list)\n        mask_inputs = outs + (img_metas, )\n        results_list = self.get_results(\n            *mask_inputs,\n            rescale=rescale,\n            instances_list=instances_list,\n            **kwargs)\n        return results_list\n\n    def onnx_export(self, img, img_metas):\n        raise NotImplementedError(f'{self.__class__.__name__} does '\n                                  f'not support ONNX EXPORT')\n"
  },
  {
    "path": "mmdet/models/dense_heads/cascade_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom __future__ import division\nimport copy\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom mmcv import ConfigDict\nfrom mmcv.ops import DeformConv2d, batched_nms\nfrom mmcv.runner import BaseModule, ModuleList\n\nfrom mmdet.core import (RegionAssigner, build_assigner, build_sampler,\n                        images_to_levels, multi_apply)\nfrom mmdet.core.utils import select_single_mlvl\nfrom ..builder import HEADS, build_head\nfrom .base_dense_head import BaseDenseHead\nfrom .rpn_head import RPNHead\n\n\nclass AdaptiveConv(BaseModule):\n    \"\"\"AdaptiveConv used to adapt the sampling location with the anchors.\n\n    Args:\n        in_channels (int): Number of channels in the input image\n        out_channels (int): Number of channels produced by the convolution\n        kernel_size (int or tuple): Size of the conv kernel. Default: 3\n        stride (int or tuple, optional): Stride of the convolution. Default: 1\n        padding (int or tuple, optional): Zero-padding added to both sides of\n            the input. Default: 1\n        dilation (int or tuple, optional): Spacing between kernel elements.\n            Default: 3\n        groups (int, optional): Number of blocked connections from input\n            channels to output channels. Default: 1\n        bias (bool, optional): If set True, adds a learnable bias to the\n            output. Default: False.\n        type (str, optional): Type of adaptive conv, can be either 'offset'\n            (arbitrary anchors) or 'dilation' (uniform anchor).\n            Default: 'dilation'.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_size=3,\n                 stride=1,\n                 padding=1,\n                 dilation=3,\n                 groups=1,\n                 bias=False,\n                 type='dilation',\n                 init_cfg=dict(\n                     type='Normal', std=0.01, override=dict(name='conv'))):\n        super(AdaptiveConv, self).__init__(init_cfg)\n        assert type in ['offset', 'dilation']\n        self.adapt_type = type\n\n        assert kernel_size == 3, 'Adaptive conv only supports kernels 3'\n        if self.adapt_type == 'offset':\n            assert stride == 1 and padding == 1 and groups == 1, \\\n                'Adaptive conv offset mode only supports padding: {1}, ' \\\n                f'stride: {1}, groups: {1}'\n            self.conv = DeformConv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=padding,\n                stride=stride,\n                groups=groups,\n                bias=bias)\n        else:\n            self.conv = nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size,\n                padding=dilation,\n                dilation=dilation)\n\n    def forward(self, x, offset):\n        \"\"\"Forward function.\"\"\"\n        if self.adapt_type == 'offset':\n            N, _, H, W = x.shape\n            assert offset is not None\n            assert H * W == offset.shape[1]\n            # reshape [N, NA, 18] to (N, 18, H, W)\n            offset = offset.permute(0, 2, 1).reshape(N, -1, H, W)\n            offset = offset.contiguous()\n            x = self.conv(x, offset)\n        else:\n            assert offset is None\n            x = self.conv(x)\n        return x\n\n\n@HEADS.register_module()\nclass StageCascadeRPNHead(RPNHead):\n    \"\"\"Stage of CascadeRPNHead.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        anchor_generator (dict): anchor generator config.\n        adapt_cfg (dict): adaptation config.\n        bridged_feature (bool, optional): whether update rpn feature.\n            Default: False.\n        with_cls (bool, optional): whether use classification branch.\n            Default: True.\n        sampling (bool, optional): whether use sampling. Default: True.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 anchor_generator=dict(\n                     type='AnchorGenerator',\n                     scales=[8],\n                     ratios=[1.0],\n                     strides=[4, 8, 16, 32, 64]),\n                 adapt_cfg=dict(type='dilation', dilation=3),\n                 bridged_feature=False,\n                 with_cls=True,\n                 sampling=True,\n                 init_cfg=None,\n                 **kwargs):\n        self.with_cls = with_cls\n        self.anchor_strides = anchor_generator['strides']\n        self.anchor_scales = anchor_generator['scales']\n        self.bridged_feature = bridged_feature\n        self.adapt_cfg = adapt_cfg\n        super(StageCascadeRPNHead, self).__init__(\n            in_channels,\n            anchor_generator=anchor_generator,\n            init_cfg=init_cfg,\n            **kwargs)\n\n        # override sampling and sampler\n        self.sampling = sampling\n        if self.train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            # use PseudoSampler when sampling is False\n            if self.sampling and hasattr(self.train_cfg, 'sampler'):\n                sampler_cfg = self.train_cfg.sampler\n            else:\n                sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n\n        if init_cfg is None:\n            self.init_cfg = dict(\n                type='Normal', std=0.01, override=[dict(name='rpn_reg')])\n            if self.with_cls:\n                self.init_cfg['override'].append(dict(name='rpn_cls'))\n\n    def _init_layers(self):\n        \"\"\"Init layers of a CascadeRPN stage.\"\"\"\n        self.rpn_conv = AdaptiveConv(self.in_channels, self.feat_channels,\n                                     **self.adapt_cfg)\n        if self.with_cls:\n            self.rpn_cls = nn.Conv2d(self.feat_channels,\n                                     self.num_anchors * self.cls_out_channels,\n                                     1)\n        self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_anchors * 4, 1)\n        self.relu = nn.ReLU(inplace=True)\n\n    def forward_single(self, x, offset):\n        \"\"\"Forward function of single scale.\"\"\"\n        bridged_x = x\n        x = self.relu(self.rpn_conv(x, offset))\n        if self.bridged_feature:\n            bridged_x = x  # update feature\n        cls_score = self.rpn_cls(x) if self.with_cls else None\n        bbox_pred = self.rpn_reg(x)\n        return bridged_x, cls_score, bbox_pred\n\n    def forward(self, feats, offset_list=None):\n        \"\"\"Forward function.\"\"\"\n        if offset_list is None:\n            offset_list = [None for _ in range(len(feats))]\n        return multi_apply(self.forward_single, feats, offset_list)\n\n    def _region_targets_single(self,\n                               anchors,\n                               valid_flags,\n                               gt_bboxes,\n                               gt_bboxes_ignore,\n                               gt_labels,\n                               img_meta,\n                               featmap_sizes,\n                               label_channels=1):\n        \"\"\"Get anchor targets based on region for single level.\"\"\"\n        assign_result = self.assigner.assign(\n            anchors,\n            valid_flags,\n            gt_bboxes,\n            img_meta,\n            featmap_sizes,\n            self.anchor_scales[0],\n            self.anchor_strides,\n            gt_bboxes_ignore=gt_bboxes_ignore,\n            gt_labels=None,\n            allowed_border=self.train_cfg.allowed_border)\n        flat_anchors = torch.cat(anchors)\n        sampling_result = self.sampler.sample(assign_result, flat_anchors,\n                                              gt_bboxes)\n\n        num_anchors = flat_anchors.shape[0]\n        bbox_targets = torch.zeros_like(flat_anchors)\n        bbox_weights = torch.zeros_like(flat_anchors)\n        labels = flat_anchors.new_zeros(num_anchors, dtype=torch.long)\n        label_weights = flat_anchors.new_zeros(num_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            if not self.reg_decoded_bbox:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n            else:\n                pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n            if gt_labels is None:\n                labels[pos_inds] = 1\n            else:\n                labels[pos_inds] = gt_labels[\n                    sampling_result.pos_assigned_gt_inds]\n            if self.train_cfg.pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg.pos_weight\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                neg_inds)\n\n    def region_targets(self,\n                       anchor_list,\n                       valid_flag_list,\n                       gt_bboxes_list,\n                       img_metas,\n                       featmap_sizes,\n                       gt_bboxes_ignore_list=None,\n                       gt_labels_list=None,\n                       label_channels=1,\n                       unmap_outputs=True):\n        \"\"\"See :func:`StageCascadeRPNHead.get_targets`.\"\"\"\n        num_imgs = len(img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n        if gt_labels_list is None:\n            gt_labels_list = [None for _ in range(num_imgs)]\n        (all_labels, all_label_weights, all_bbox_targets, all_bbox_weights,\n         pos_inds_list, neg_inds_list) = multi_apply(\n             self._region_targets_single,\n             anchor_list,\n             valid_flag_list,\n             gt_bboxes_list,\n             gt_bboxes_ignore_list,\n             gt_labels_list,\n             img_metas,\n             featmap_sizes=featmap_sizes,\n             label_channels=label_channels)\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n        # sampled anchors of all images\n        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n        # split targets to a list w.r.t. multiple levels\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors)\n        return (labels_list, label_weights_list, bbox_targets_list,\n                bbox_weights_list, num_total_pos, num_total_neg)\n\n    def get_targets(self,\n                    anchor_list,\n                    valid_flag_list,\n                    gt_bboxes,\n                    img_metas,\n                    featmap_sizes,\n                    gt_bboxes_ignore=None,\n                    label_channels=1):\n        \"\"\"Compute regression and classification targets for anchors.\n\n        Args:\n            anchor_list (list[list]): Multi level anchors of each image.\n            valid_flag_list (list[list]): Multi level valid flags of each\n                image.\n            gt_bboxes (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            featmap_sizes (list[Tensor]): Feature mapsize each level\n            gt_bboxes_ignore (list[Tensor]): Ignore bboxes of each images\n            label_channels (int): Channel of label.\n\n        Returns:\n            cls_reg_targets (tuple)\n        \"\"\"\n        if isinstance(self.assigner, RegionAssigner):\n            cls_reg_targets = self.region_targets(\n                anchor_list,\n                valid_flag_list,\n                gt_bboxes,\n                img_metas,\n                featmap_sizes,\n                gt_bboxes_ignore_list=gt_bboxes_ignore,\n                label_channels=label_channels)\n        else:\n            cls_reg_targets = super(StageCascadeRPNHead, self).get_targets(\n                anchor_list,\n                valid_flag_list,\n                gt_bboxes,\n                img_metas,\n                gt_bboxes_ignore_list=gt_bboxes_ignore,\n                label_channels=label_channels)\n        return cls_reg_targets\n\n    def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes):\n        \"\"\" Get offset for deformable conv based on anchor shape\n        NOTE: currently support deformable kernel_size=3 and dilation=1\n\n        Args:\n            anchor_list (list[list[tensor])): [NI, NLVL, NA, 4] list of\n                multi-level anchors\n            anchor_strides (list[int]): anchor stride of each level\n\n        Returns:\n            offset_list (list[tensor]): [NLVL, NA, 2, 18]: offset of DeformConv\n                kernel.\n        \"\"\"\n\n        def _shape_offset(anchors, stride, ks=3, dilation=1):\n            # currently support kernel_size=3 and dilation=1\n            assert ks == 3 and dilation == 1\n            pad = (ks - 1) // 2\n            idx = torch.arange(-pad, pad + 1, dtype=dtype, device=device)\n            yy, xx = torch.meshgrid(idx, idx)  # return order matters\n            xx = xx.reshape(-1)\n            yy = yy.reshape(-1)\n            w = (anchors[:, 2] - anchors[:, 0]) / stride\n            h = (anchors[:, 3] - anchors[:, 1]) / stride\n            w = w / (ks - 1) - dilation\n            h = h / (ks - 1) - dilation\n            offset_x = w[:, None] * xx  # (NA, ks**2)\n            offset_y = h[:, None] * yy  # (NA, ks**2)\n            return offset_x, offset_y\n\n        def _ctr_offset(anchors, stride, featmap_size):\n            feat_h, feat_w = featmap_size\n            assert len(anchors) == feat_h * feat_w\n\n            x = (anchors[:, 0] + anchors[:, 2]) * 0.5\n            y = (anchors[:, 1] + anchors[:, 3]) * 0.5\n            # compute centers on feature map\n            x = x / stride\n            y = y / stride\n            # compute predefine centers\n            xx = torch.arange(0, feat_w, device=anchors.device)\n            yy = torch.arange(0, feat_h, device=anchors.device)\n            yy, xx = torch.meshgrid(yy, xx)\n            xx = xx.reshape(-1).type_as(x)\n            yy = yy.reshape(-1).type_as(y)\n\n            offset_x = x - xx  # (NA, )\n            offset_y = y - yy  # (NA, )\n            return offset_x, offset_y\n\n        num_imgs = len(anchor_list)\n        num_lvls = len(anchor_list[0])\n        dtype = anchor_list[0][0].dtype\n        device = anchor_list[0][0].device\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n\n        offset_list = []\n        for i in range(num_imgs):\n            mlvl_offset = []\n            for lvl in range(num_lvls):\n                c_offset_x, c_offset_y = _ctr_offset(anchor_list[i][lvl],\n                                                     anchor_strides[lvl],\n                                                     featmap_sizes[lvl])\n                s_offset_x, s_offset_y = _shape_offset(anchor_list[i][lvl],\n                                                       anchor_strides[lvl])\n\n                # offset = ctr_offset + shape_offset\n                offset_x = s_offset_x + c_offset_x[:, None]\n                offset_y = s_offset_y + c_offset_y[:, None]\n\n                # offset order (y0, x0, y1, x2, .., y8, x8, y9, x9)\n                offset = torch.stack([offset_y, offset_x], dim=-1)\n                offset = offset.reshape(offset.size(0), -1)  # [NA, 2*ks**2]\n                mlvl_offset.append(offset)\n            offset_list.append(torch.cat(mlvl_offset))  # [totalNA, 2*ks**2]\n        offset_list = images_to_levels(offset_list, num_level_anchors)\n        return offset_list\n\n    def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,\n                    bbox_targets, bbox_weights, num_total_samples):\n        \"\"\"Loss function on single scale.\"\"\"\n        # classification loss\n        if self.with_cls:\n            labels = labels.reshape(-1)\n            label_weights = label_weights.reshape(-1)\n            cls_score = cls_score.permute(0, 2, 3,\n                                          1).reshape(-1, self.cls_out_channels)\n            loss_cls = self.loss_cls(\n                cls_score, labels, label_weights, avg_factor=num_total_samples)\n        # regression loss\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        bbox_weights = bbox_weights.reshape(-1, 4)\n        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n        if self.reg_decoded_bbox:\n            # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n            # is applied directly on the decoded bounding boxes, it\n            # decodes the already encoded coordinates to absolute format.\n            anchors = anchors.reshape(-1, 4)\n            bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)\n        loss_reg = self.loss_bbox(\n            bbox_pred,\n            bbox_targets,\n            bbox_weights,\n            avg_factor=num_total_samples)\n        if self.with_cls:\n            return loss_cls, loss_reg\n        return None, loss_reg\n\n    def loss(self,\n             anchor_list,\n             valid_flag_list,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            anchor_list (list[list]): Multi level anchors of each image.\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss. Default: None\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in bbox_preds]\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            featmap_sizes,\n            gt_bboxes_ignore=gt_bboxes_ignore,\n            label_channels=label_channels)\n        if cls_reg_targets is None:\n            return None\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg) = cls_reg_targets\n        if self.sampling:\n            num_total_samples = num_total_pos + num_total_neg\n        else:\n            # 200 is hard-coded average factor,\n            # which follows guided anchoring.\n            num_total_samples = sum([label.numel()\n                                     for label in labels_list]) / 200.0\n\n        # change per image, per level anchor_list to per_level, per_image\n        mlvl_anchor_list = list(zip(*anchor_list))\n        # concat mlvl_anchor_list\n        mlvl_anchor_list = [\n            torch.cat(anchors, dim=0) for anchors in mlvl_anchor_list\n        ]\n\n        losses = multi_apply(\n            self.loss_single,\n            cls_scores,\n            bbox_preds,\n            mlvl_anchor_list,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            num_total_samples=num_total_samples)\n        if self.with_cls:\n            return dict(loss_rpn_cls=losses[0], loss_rpn_reg=losses[1])\n        return dict(loss_rpn_reg=losses[1])\n\n    def get_bboxes(self,\n                   anchor_list,\n                   cls_scores,\n                   bbox_preds,\n                   img_metas,\n                   cfg,\n                   rescale=False):\n        \"\"\"Get proposal predict.\n\n        Args:\n            anchor_list (list[list]): Multi level anchors of each image.\n            cls_scores (list[Tensor]): Classification scores for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 4, H, W).\n            img_metas (list[dict], Optional): Image meta info. Default None.\n            cfg (mmcv.Config, Optional): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n\n        Returns:\n            Tensor: Labeled boxes in shape (n, 5), where the first 4 columns\n                are bounding box positions (tl_x, tl_y, br_x, br_y) and the\n                5-th column is a score between 0 and 1.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n\n        result_list = []\n        for img_id in range(len(img_metas)):\n            cls_score_list = select_single_mlvl(cls_scores, img_id)\n            bbox_pred_list = select_single_mlvl(bbox_preds, img_id)\n            img_shape = img_metas[img_id]['img_shape']\n            scale_factor = img_metas[img_id]['scale_factor']\n            proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,\n                                                anchor_list[img_id], img_shape,\n                                                scale_factor, cfg, rescale)\n            result_list.append(proposals)\n        return result_list\n\n    def _get_bboxes_single(self,\n                           cls_scores,\n                           bbox_preds,\n                           mlvl_anchors,\n                           img_shape,\n                           scale_factor,\n                           cfg,\n                           rescale=False):\n        \"\"\"Transform outputs of a single image into bbox predictions.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_anchors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has\n                shape (num_anchors * 4, H, W).\n            mlvl_anchors (list[Tensor]): Box reference from all scale\n                levels of a single image, each item has shape\n                (num_total_anchors, 4).\n            img_shape (tuple[int]): Shape of the input image,\n                (height, width, 3).\n            scale_factor (ndarray): Scale factor of the image arange as\n                (w_scale, h_scale, w_scale, h_scale).\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default False.\n\n        Returns:\n            Tensor: Labeled boxes in shape (n, 5), where the first 4 columns\n                are bounding box positions (tl_x, tl_y, br_x, br_y) and the\n                5-th column is a score between 0 and 1.\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n        # bboxes from different level should be independent during NMS,\n        # level_ids are used as labels for batched NMS to separate them\n        level_ids = []\n        mlvl_scores = []\n        mlvl_bbox_preds = []\n        mlvl_valid_anchors = []\n        nms_pre = cfg.get('nms_pre', -1)\n        for idx in range(len(cls_scores)):\n            rpn_cls_score = cls_scores[idx]\n            rpn_bbox_pred = bbox_preds[idx]\n            assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]\n            rpn_cls_score = rpn_cls_score.permute(1, 2, 0)\n            if self.use_sigmoid_cls:\n                rpn_cls_score = rpn_cls_score.reshape(-1)\n                scores = rpn_cls_score.sigmoid()\n            else:\n                rpn_cls_score = rpn_cls_score.reshape(-1, 2)\n                # We set FG labels to [0, num_class-1] and BG label to\n                # num_class in RPN head since mmdet v2.5, which is unified to\n                # be consistent with other head since mmdet v2.0. In mmdet v2.0\n                # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.\n                scores = rpn_cls_score.softmax(dim=1)[:, 0]\n            rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n            anchors = mlvl_anchors[idx]\n\n            if 0 < nms_pre < scores.shape[0]:\n                # sort is faster than topk\n                # _, topk_inds = scores.topk(cfg.nms_pre)\n                ranked_scores, rank_inds = scores.sort(descending=True)\n                topk_inds = rank_inds[:nms_pre]\n                scores = ranked_scores[:nms_pre]\n                rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]\n                anchors = anchors[topk_inds, :]\n            mlvl_scores.append(scores)\n            mlvl_bbox_preds.append(rpn_bbox_pred)\n            mlvl_valid_anchors.append(anchors)\n            level_ids.append(\n                scores.new_full((scores.size(0), ), idx, dtype=torch.long))\n\n        scores = torch.cat(mlvl_scores)\n        anchors = torch.cat(mlvl_valid_anchors)\n        rpn_bbox_pred = torch.cat(mlvl_bbox_preds)\n        proposals = self.bbox_coder.decode(\n            anchors, rpn_bbox_pred, max_shape=img_shape)\n        ids = torch.cat(level_ids)\n\n        if cfg.min_bbox_size >= 0:\n            w = proposals[:, 2] - proposals[:, 0]\n            h = proposals[:, 3] - proposals[:, 1]\n            valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)\n            if not valid_mask.all():\n                proposals = proposals[valid_mask]\n                scores = scores[valid_mask]\n                ids = ids[valid_mask]\n\n        # deprecate arguments warning\n        if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:\n            warnings.warn(\n                'In rpn_proposal or test_cfg, '\n                'nms_thr has been moved to a dict named nms as '\n                'iou_threshold, max_num has been renamed as max_per_img, '\n                'name of original arguments and the way to specify '\n                'iou_threshold of NMS will be deprecated.')\n        if 'nms' not in cfg:\n            cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))\n        if 'max_num' in cfg:\n            if 'max_per_img' in cfg:\n                assert cfg.max_num == cfg.max_per_img, f'You ' \\\n                    f'set max_num and ' \\\n                    f'max_per_img at the same time, but get {cfg.max_num} ' \\\n                    f'and {cfg.max_per_img} respectively' \\\n                    'Please delete max_num which will be deprecated.'\n            else:\n                cfg.max_per_img = cfg.max_num\n        if 'nms_thr' in cfg:\n            assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set' \\\n                f' iou_threshold in nms and ' \\\n                f'nms_thr at the same time, but get' \\\n                f' {cfg.nms.iou_threshold} and {cfg.nms_thr}' \\\n                f' respectively. Please delete the nms_thr ' \\\n                f'which will be deprecated.'\n\n        if proposals.numel() > 0:\n            dets, _ = batched_nms(proposals, scores, ids, cfg.nms)\n        else:\n            return proposals.new_zeros(0, 5)\n\n        return dets[:cfg.max_per_img]\n\n    def refine_bboxes(self, anchor_list, bbox_preds, img_metas):\n        \"\"\"Refine bboxes through stages.\"\"\"\n        num_levels = len(bbox_preds)\n        new_anchor_list = []\n        for img_id in range(len(img_metas)):\n            mlvl_anchors = []\n            for i in range(num_levels):\n                bbox_pred = bbox_preds[i][img_id].detach()\n                bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n                img_shape = img_metas[img_id]['img_shape']\n                bboxes = self.bbox_coder.decode(anchor_list[img_id][i],\n                                                bbox_pred, img_shape)\n                mlvl_anchors.append(bboxes)\n            new_anchor_list.append(mlvl_anchors)\n        return new_anchor_list\n\n\n@HEADS.register_module()\nclass CascadeRPNHead(BaseDenseHead):\n    \"\"\"The CascadeRPNHead will predict more accurate region proposals, which is\n    required for two-stage detectors (such as Fast/Faster R-CNN). CascadeRPN\n    consists of a sequence of RPNStage to progressively improve the accuracy of\n    the detected proposals.\n\n    More details can be found in ``https://arxiv.org/abs/1909.06720``.\n\n    Args:\n        num_stages (int): number of CascadeRPN stages.\n        stages (list[dict]): list of configs to build the stages.\n        train_cfg (list[dict]): list of configs at training time each stage.\n        test_cfg (dict): config at testing time.\n    \"\"\"\n\n    def __init__(self, num_stages, stages, train_cfg, test_cfg, init_cfg=None):\n        super(CascadeRPNHead, self).__init__(init_cfg)\n        assert num_stages == len(stages)\n        self.num_stages = num_stages\n        # Be careful! Pretrained weights cannot be loaded when use\n        # nn.ModuleList\n        self.stages = ModuleList()\n        for i in range(len(stages)):\n            train_cfg_i = train_cfg[i] if train_cfg is not None else None\n            stages[i].update(train_cfg=train_cfg_i)\n            stages[i].update(test_cfg=test_cfg)\n            self.stages.append(build_head(stages[i]))\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    def loss(self):\n        \"\"\"loss() is implemented in StageCascadeRPNHead.\"\"\"\n        pass\n\n    def get_bboxes(self):\n        \"\"\"get_bboxes() is implemented in StageCascadeRPNHead.\"\"\"\n        pass\n\n    def forward_train(self,\n                      x,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels=None,\n                      gt_bboxes_ignore=None,\n                      proposal_cfg=None):\n        \"\"\"Forward train function.\"\"\"\n        assert gt_labels is None, 'RPN does not require gt_labels'\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in x]\n        device = x[0].device\n        anchor_list, valid_flag_list = self.stages[0].get_anchors(\n            featmap_sizes, img_metas, device=device)\n\n        losses = dict()\n\n        for i in range(self.num_stages):\n            stage = self.stages[i]\n\n            if stage.adapt_cfg['type'] == 'offset':\n                offset_list = stage.anchor_offset(anchor_list,\n                                                  stage.anchor_strides,\n                                                  featmap_sizes)\n            else:\n                offset_list = None\n            x, cls_score, bbox_pred = stage(x, offset_list)\n            rpn_loss_inputs = (anchor_list, valid_flag_list, cls_score,\n                               bbox_pred, gt_bboxes, img_metas)\n            stage_loss = stage.loss(*rpn_loss_inputs)\n            for name, value in stage_loss.items():\n                losses['s{}.{}'.format(i, name)] = value\n\n            # refine boxes\n            if i < self.num_stages - 1:\n                anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,\n                                                  img_metas)\n        if proposal_cfg is None:\n            return losses\n        else:\n            proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,\n                                                       bbox_pred, img_metas,\n                                                       self.test_cfg)\n            return losses, proposal_list\n\n    def simple_test_rpn(self, x, img_metas):\n        \"\"\"Simple forward test function.\"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in x]\n        device = x[0].device\n        anchor_list, _ = self.stages[0].get_anchors(\n            featmap_sizes, img_metas, device=device)\n\n        for i in range(self.num_stages):\n            stage = self.stages[i]\n            if stage.adapt_cfg['type'] == 'offset':\n                offset_list = stage.anchor_offset(anchor_list,\n                                                  stage.anchor_strides,\n                                                  featmap_sizes)\n            else:\n                offset_list = None\n            x, cls_score, bbox_pred = stage(x, offset_list)\n            if i < self.num_stages - 1:\n                anchor_list = stage.refine_bboxes(anchor_list, bbox_pred,\n                                                  img_metas)\n\n        proposal_list = self.stages[-1].get_bboxes(anchor_list, cls_score,\n                                                   bbox_pred, img_metas,\n                                                   self.test_cfg)\n        return proposal_list\n\n    def aug_test_rpn(self, x, img_metas):\n        \"\"\"Augmented forward test function.\"\"\"\n        raise NotImplementedError(\n            'CascadeRPNHead does not support test-time augmentation')\n"
  },
  {
    "path": "mmdet/models/dense_heads/centernet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import bias_init_with_prob, normal_init\nfrom mmcv.ops import batched_nms\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import multi_apply\nfrom mmdet.models import HEADS, build_loss\nfrom mmdet.models.utils import gaussian_radius, gen_gaussian_target\nfrom ..utils.gaussian_target import (get_local_maximum, get_topk_from_heatmap,\n                                     transpose_and_gather_feat)\nfrom .base_dense_head import BaseDenseHead\nfrom .dense_test_mixins import BBoxTestMixin\n\n\n@HEADS.register_module()\nclass CenterNetHead(BaseDenseHead, BBoxTestMixin):\n    \"\"\"Objects as Points Head. CenterHead use center_point to indicate object's\n    position. Paper link <https://arxiv.org/abs/1904.07850>\n\n    Args:\n        in_channel (int): Number of channel in the input feature map.\n        feat_channel (int): Number of channel in the intermediate feature map.\n        num_classes (int): Number of categories excluding the background\n            category.\n        loss_center_heatmap (dict | None): Config of center heatmap loss.\n            Default: GaussianFocalLoss.\n        loss_wh (dict | None): Config of wh loss. Default: L1Loss.\n        loss_offset (dict | None): Config of offset loss. Default: L1Loss.\n        train_cfg (dict | None): Training config. Useless in CenterNet,\n            but we keep this variable for SingleStageDetector. Default: None.\n        test_cfg (dict | None): Testing config of CenterNet. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channel,\n                 feat_channel,\n                 num_classes,\n                 loss_center_heatmap=dict(\n                     type='GaussianFocalLoss', loss_weight=1.0),\n                 loss_wh=dict(type='L1Loss', loss_weight=0.1),\n                 loss_offset=dict(type='L1Loss', loss_weight=1.0),\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=None):\n        super(CenterNetHead, self).__init__(init_cfg)\n        self.num_classes = num_classes\n        self.heatmap_head = self._build_head(in_channel, feat_channel,\n                                             num_classes)\n        self.wh_head = self._build_head(in_channel, feat_channel, 2)\n        self.offset_head = self._build_head(in_channel, feat_channel, 2)\n\n        self.loss_center_heatmap = build_loss(loss_center_heatmap)\n        self.loss_wh = build_loss(loss_wh)\n        self.loss_offset = build_loss(loss_offset)\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self.fp16_enabled = False\n\n    def _build_head(self, in_channel, feat_channel, out_channel):\n        \"\"\"Build head for each branch.\"\"\"\n        layer = nn.Sequential(\n            nn.Conv2d(in_channel, feat_channel, kernel_size=3, padding=1),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(feat_channel, out_channel, kernel_size=1))\n        return layer\n\n    def init_weights(self):\n        \"\"\"Initialize weights of the head.\"\"\"\n        bias_init = bias_init_with_prob(0.1)\n        self.heatmap_head[-1].bias.data.fill_(bias_init)\n        for head in [self.wh_head, self.offset_head]:\n            for m in head.modules():\n                if isinstance(m, nn.Conv2d):\n                    normal_init(m, std=0.001)\n\n    def forward(self, feats):\n        \"\"\"Forward features. Notice CenterNet head does not use FPN.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            center_heatmap_preds (List[Tensor]): center predict heatmaps for\n                all levels, the channels number is num_classes.\n            wh_preds (List[Tensor]): wh predicts for all levels, the channels\n                number is 2.\n            offset_preds (List[Tensor]): offset predicts for all levels, the\n               channels number is 2.\n        \"\"\"\n        return multi_apply(self.forward_single, feats)\n\n    def forward_single(self, feat):\n        \"\"\"Forward feature of a single level.\n\n        Args:\n            feat (Tensor): Feature of a single level.\n\n        Returns:\n            center_heatmap_pred (Tensor): center predict heatmaps, the\n               channels number is num_classes.\n            wh_pred (Tensor): wh predicts, the channels number is 2.\n            offset_pred (Tensor): offset predicts, the channels number is 2.\n        \"\"\"\n        center_heatmap_pred = self.heatmap_head(feat).sigmoid()\n        wh_pred = self.wh_head(feat)\n        offset_pred = self.offset_head(feat)\n        return center_heatmap_pred, wh_pred, offset_pred\n\n    @force_fp32(apply_to=('center_heatmap_preds', 'wh_preds', 'offset_preds'))\n    def loss(self,\n             center_heatmap_preds,\n             wh_preds,\n             offset_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            center_heatmap_preds (list[Tensor]): center predict heatmaps for\n               all levels with shape (B, num_classes, H, W).\n            wh_preds (list[Tensor]): wh predicts for all levels with\n               shape (B, 2, H, W).\n            offset_preds (list[Tensor]): offset predicts for all levels\n               with shape (B, 2, H, W).\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss. Default: None\n\n        Returns:\n            dict[str, Tensor]: which has components below:\n                - loss_center_heatmap (Tensor): loss of center heatmap.\n                - loss_wh (Tensor): loss of hw heatmap\n                - loss_offset (Tensor): loss of offset heatmap.\n        \"\"\"\n        assert len(center_heatmap_preds) == len(wh_preds) == len(\n            offset_preds) == 1\n        center_heatmap_pred = center_heatmap_preds[0]\n        wh_pred = wh_preds[0]\n        offset_pred = offset_preds[0]\n\n        target_result, avg_factor = self.get_targets(gt_bboxes, gt_labels,\n                                                     center_heatmap_pred.shape,\n                                                     img_metas[0]['pad_shape'])\n\n        center_heatmap_target = target_result['center_heatmap_target']\n        wh_target = target_result['wh_target']\n        offset_target = target_result['offset_target']\n        wh_offset_target_weight = target_result['wh_offset_target_weight']\n\n        # Since the channel of wh_target and offset_target is 2, the avg_factor\n        # of loss_center_heatmap is always 1/2 of loss_wh and loss_offset.\n        loss_center_heatmap = self.loss_center_heatmap(\n            center_heatmap_pred, center_heatmap_target, avg_factor=avg_factor)\n        loss_wh = self.loss_wh(\n            wh_pred,\n            wh_target,\n            wh_offset_target_weight,\n            avg_factor=avg_factor * 2)\n        loss_offset = self.loss_offset(\n            offset_pred,\n            offset_target,\n            wh_offset_target_weight,\n            avg_factor=avg_factor * 2)\n        return dict(\n            loss_center_heatmap=loss_center_heatmap,\n            loss_wh=loss_wh,\n            loss_offset=loss_offset)\n\n    def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape):\n        \"\"\"Compute regression and classification targets in multiple images.\n\n        Args:\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box.\n            feat_shape (list[int]): feature map shape with value [B, _, H, W]\n            img_shape (list[int]): image shape in [h, w] format.\n\n        Returns:\n            tuple[dict,float]: The float value is mean avg_factor, the dict has\n               components below:\n               - center_heatmap_target (Tensor): targets of center heatmap, \\\n                   shape (B, num_classes, H, W).\n               - wh_target (Tensor): targets of wh predict, shape \\\n                   (B, 2, H, W).\n               - offset_target (Tensor): targets of offset predict, shape \\\n                   (B, 2, H, W).\n               - wh_offset_target_weight (Tensor): weights of wh and offset \\\n                   predict, shape (B, 2, H, W).\n        \"\"\"\n        img_h, img_w = img_shape[:2]\n        bs, _, feat_h, feat_w = feat_shape\n\n        width_ratio = float(feat_w / img_w)\n        height_ratio = float(feat_h / img_h)\n\n        center_heatmap_target = gt_bboxes[-1].new_zeros(\n            [bs, self.num_classes, feat_h, feat_w])\n        wh_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w])\n        offset_target = gt_bboxes[-1].new_zeros([bs, 2, feat_h, feat_w])\n        wh_offset_target_weight = gt_bboxes[-1].new_zeros(\n            [bs, 2, feat_h, feat_w])\n\n        for batch_id in range(bs):\n            gt_bbox = gt_bboxes[batch_id]\n            gt_label = gt_labels[batch_id]\n            center_x = (gt_bbox[:, [0]] + gt_bbox[:, [2]]) * width_ratio / 2\n            center_y = (gt_bbox[:, [1]] + gt_bbox[:, [3]]) * height_ratio / 2\n            gt_centers = torch.cat((center_x, center_y), dim=1)\n\n            for j, ct in enumerate(gt_centers):\n                ctx_int, cty_int = ct.int()\n                ctx, cty = ct\n                scale_box_h = (gt_bbox[j][3] - gt_bbox[j][1]) * height_ratio\n                scale_box_w = (gt_bbox[j][2] - gt_bbox[j][0]) * width_ratio\n                radius = gaussian_radius([scale_box_h, scale_box_w],\n                                         min_overlap=0.3)\n                radius = max(0, int(radius))\n                ind = gt_label[j]\n                gen_gaussian_target(center_heatmap_target[batch_id, ind],\n                                    [ctx_int, cty_int], radius)\n\n                wh_target[batch_id, 0, cty_int, ctx_int] = scale_box_w\n                wh_target[batch_id, 1, cty_int, ctx_int] = scale_box_h\n\n                offset_target[batch_id, 0, cty_int, ctx_int] = ctx - ctx_int\n                offset_target[batch_id, 1, cty_int, ctx_int] = cty - cty_int\n\n                wh_offset_target_weight[batch_id, :, cty_int, ctx_int] = 1\n\n        avg_factor = max(1, center_heatmap_target.eq(1).sum())\n        target_result = dict(\n            center_heatmap_target=center_heatmap_target,\n            wh_target=wh_target,\n            offset_target=offset_target,\n            wh_offset_target_weight=wh_offset_target_weight)\n        return target_result, avg_factor\n\n    @force_fp32(apply_to=('center_heatmap_preds', 'wh_preds', 'offset_preds'))\n    def get_bboxes(self,\n                   center_heatmap_preds,\n                   wh_preds,\n                   offset_preds,\n                   img_metas,\n                   rescale=True,\n                   with_nms=False):\n        \"\"\"Transform network output for a batch into bbox predictions.\n\n        Args:\n            center_heatmap_preds (list[Tensor]): Center predict heatmaps for\n                all levels with shape (B, num_classes, H, W).\n            wh_preds (list[Tensor]): WH predicts for all levels with\n                shape (B, 2, H, W).\n            offset_preds (list[Tensor]): Offset predicts for all levels\n                with shape (B, 2, H, W).\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If True, return boxes in original image space.\n                Default: True.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: False.\n\n        Returns:\n            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n                The first item is an (n, 5) tensor, where 5 represent\n                (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.\n                The shape of the second tensor in the tuple is (n,), and\n                each element represents the class label of the corresponding\n                box.\n        \"\"\"\n        assert len(center_heatmap_preds) == len(wh_preds) == len(\n            offset_preds) == 1\n        result_list = []\n        for img_id in range(len(img_metas)):\n            result_list.append(\n                self._get_bboxes_single(\n                    center_heatmap_preds[0][img_id:img_id + 1, ...],\n                    wh_preds[0][img_id:img_id + 1, ...],\n                    offset_preds[0][img_id:img_id + 1, ...],\n                    img_metas[img_id],\n                    rescale=rescale,\n                    with_nms=with_nms))\n        return result_list\n\n    def _get_bboxes_single(self,\n                           center_heatmap_pred,\n                           wh_pred,\n                           offset_pred,\n                           img_meta,\n                           rescale=False,\n                           with_nms=True):\n        \"\"\"Transform outputs of a single image into bbox results.\n\n        Args:\n            center_heatmap_pred (Tensor): Center heatmap for current level with\n                shape (1, num_classes, H, W).\n            wh_pred (Tensor): WH heatmap for current level with shape\n                (1, num_classes, H, W).\n            offset_pred (Tensor): Offset for current level with shape\n                (1, corner_offset_channels, H, W).\n            img_meta (dict): Meta information of current image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            tuple[Tensor, Tensor]: The first item is an (n, 5) tensor, where\n                5 represent (tl_x, tl_y, br_x, br_y, score) and the score\n                between 0 and 1. The shape of the second tensor in the tuple\n                is (n,), and each element represents the class label of the\n                corresponding box.\n        \"\"\"\n        batch_det_bboxes, batch_labels = self.decode_heatmap(\n            center_heatmap_pred,\n            wh_pred,\n            offset_pred,\n            img_meta['batch_input_shape'],\n            k=self.test_cfg.topk,\n            kernel=self.test_cfg.local_maximum_kernel)\n\n        det_bboxes = batch_det_bboxes.view([-1, 5])\n        det_labels = batch_labels.view(-1)\n\n        batch_border = det_bboxes.new_tensor(img_meta['border'])[...,\n                                                                 [2, 0, 2, 0]]\n        det_bboxes[..., :4] -= batch_border\n\n        if rescale:\n            det_bboxes[..., :4] /= det_bboxes.new_tensor(\n                img_meta['scale_factor'])\n\n        if with_nms:\n            det_bboxes, det_labels = self._bboxes_nms(det_bboxes, det_labels,\n                                                      self.test_cfg)\n        return det_bboxes, det_labels\n\n    def decode_heatmap(self,\n                       center_heatmap_pred,\n                       wh_pred,\n                       offset_pred,\n                       img_shape,\n                       k=100,\n                       kernel=3):\n        \"\"\"Transform outputs into detections raw bbox prediction.\n\n        Args:\n            center_heatmap_pred (Tensor): center predict heatmap,\n               shape (B, num_classes, H, W).\n            wh_pred (Tensor): wh predict, shape (B, 2, H, W).\n            offset_pred (Tensor): offset predict, shape (B, 2, H, W).\n            img_shape (list[int]): image shape in [h, w] format.\n            k (int): Get top k center keypoints from heatmap. Default 100.\n            kernel (int): Max pooling kernel for extract local maximum pixels.\n               Default 3.\n\n        Returns:\n            tuple[torch.Tensor]: Decoded output of CenterNetHead, containing\n               the following Tensors:\n\n              - batch_bboxes (Tensor): Coords of each box with shape (B, k, 5)\n              - batch_topk_labels (Tensor): Categories of each box with \\\n                  shape (B, k)\n        \"\"\"\n        height, width = center_heatmap_pred.shape[2:]\n        inp_h, inp_w = img_shape\n\n        center_heatmap_pred = get_local_maximum(\n            center_heatmap_pred, kernel=kernel)\n\n        *batch_dets, topk_ys, topk_xs = get_topk_from_heatmap(\n            center_heatmap_pred, k=k)\n        batch_scores, batch_index, batch_topk_labels = batch_dets\n\n        wh = transpose_and_gather_feat(wh_pred, batch_index)\n        offset = transpose_and_gather_feat(offset_pred, batch_index)\n        topk_xs = topk_xs + offset[..., 0]\n        topk_ys = topk_ys + offset[..., 1]\n        tl_x = (topk_xs - wh[..., 0] / 2) * (inp_w / width)\n        tl_y = (topk_ys - wh[..., 1] / 2) * (inp_h / height)\n        br_x = (topk_xs + wh[..., 0] / 2) * (inp_w / width)\n        br_y = (topk_ys + wh[..., 1] / 2) * (inp_h / height)\n\n        batch_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], dim=2)\n        batch_bboxes = torch.cat((batch_bboxes, batch_scores[..., None]),\n                                 dim=-1)\n        return batch_bboxes, batch_topk_labels\n\n    def _bboxes_nms(self, bboxes, labels, cfg):\n        if labels.numel() > 0:\n            max_num = cfg.max_per_img\n            bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:,\n                                                             -1].contiguous(),\n                                       labels, cfg.nms)\n            if max_num > 0:\n                bboxes = bboxes[:max_num]\n                labels = labels[keep][:max_num]\n\n        return bboxes, labels\n"
  },
  {
    "path": "mmdet/models/dense_heads/centripetal_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, normal_init\nfrom mmcv.ops import DeformConv2d\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import multi_apply\nfrom ..builder import HEADS, build_loss\nfrom .corner_head import CornerHead\n\n\n@HEADS.register_module()\nclass CentripetalHead(CornerHead):\n    \"\"\"Head of CentripetalNet: Pursuing High-quality Keypoint Pairs for Object\n    Detection.\n\n    CentripetalHead inherits from :class:`CornerHead`. It removes the\n    embedding branch and adds guiding shift and centripetal shift branches.\n    More details can be found in the `paper\n    <https://arxiv.org/abs/2003.09119>`_ .\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        num_feat_levels (int): Levels of feature from the previous module. 2\n            for HourglassNet-104 and 1 for HourglassNet-52. HourglassNet-104\n            outputs the final feature and intermediate supervision feature and\n            HourglassNet-52 only outputs the final feature. Default: 2.\n        corner_emb_channels (int): Channel of embedding vector. Default: 1.\n        train_cfg (dict | None): Training config. Useless in CornerHead,\n            but we keep this variable for SingleStageDetector. Default: None.\n        test_cfg (dict | None): Testing config of CornerHead. Default: None.\n        loss_heatmap (dict | None): Config of corner heatmap loss. Default:\n            GaussianFocalLoss.\n        loss_embedding (dict | None): Config of corner embedding loss. Default:\n            AssociativeEmbeddingLoss.\n        loss_offset (dict | None): Config of corner offset loss. Default:\n            SmoothL1Loss.\n        loss_guiding_shift (dict): Config of guiding shift loss. Default:\n            SmoothL1Loss.\n        loss_centripetal_shift (dict): Config of centripetal shift loss.\n            Default: SmoothL1Loss.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 centripetal_shift_channels=2,\n                 guiding_shift_channels=2,\n                 feat_adaption_conv_kernel=3,\n                 loss_guiding_shift=dict(\n                     type='SmoothL1Loss', beta=1.0, loss_weight=0.05),\n                 loss_centripetal_shift=dict(\n                     type='SmoothL1Loss', beta=1.0, loss_weight=1),\n                 init_cfg=None,\n                 **kwargs):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        assert centripetal_shift_channels == 2, (\n            'CentripetalHead only support centripetal_shift_channels == 2')\n        self.centripetal_shift_channels = centripetal_shift_channels\n        assert guiding_shift_channels == 2, (\n            'CentripetalHead only support guiding_shift_channels == 2')\n        self.guiding_shift_channels = guiding_shift_channels\n        self.feat_adaption_conv_kernel = feat_adaption_conv_kernel\n        super(CentripetalHead, self).__init__(\n            *args, init_cfg=init_cfg, **kwargs)\n        self.loss_guiding_shift = build_loss(loss_guiding_shift)\n        self.loss_centripetal_shift = build_loss(loss_centripetal_shift)\n\n    def _init_centripetal_layers(self):\n        \"\"\"Initialize centripetal layers.\n\n        Including feature adaption deform convs (feat_adaption), deform offset\n        prediction convs (dcn_off), guiding shift (guiding_shift) and\n        centripetal shift ( centripetal_shift). Each branch has two parts:\n        prefix `tl_` for top-left and `br_` for bottom-right.\n        \"\"\"\n        self.tl_feat_adaption = nn.ModuleList()\n        self.br_feat_adaption = nn.ModuleList()\n        self.tl_dcn_offset = nn.ModuleList()\n        self.br_dcn_offset = nn.ModuleList()\n        self.tl_guiding_shift = nn.ModuleList()\n        self.br_guiding_shift = nn.ModuleList()\n        self.tl_centripetal_shift = nn.ModuleList()\n        self.br_centripetal_shift = nn.ModuleList()\n\n        for _ in range(self.num_feat_levels):\n            self.tl_feat_adaption.append(\n                DeformConv2d(self.in_channels, self.in_channels,\n                             self.feat_adaption_conv_kernel, 1, 1))\n            self.br_feat_adaption.append(\n                DeformConv2d(self.in_channels, self.in_channels,\n                             self.feat_adaption_conv_kernel, 1, 1))\n\n            self.tl_guiding_shift.append(\n                self._make_layers(\n                    out_channels=self.guiding_shift_channels,\n                    in_channels=self.in_channels))\n            self.br_guiding_shift.append(\n                self._make_layers(\n                    out_channels=self.guiding_shift_channels,\n                    in_channels=self.in_channels))\n\n            self.tl_dcn_offset.append(\n                ConvModule(\n                    self.guiding_shift_channels,\n                    self.feat_adaption_conv_kernel**2 *\n                    self.guiding_shift_channels,\n                    1,\n                    bias=False,\n                    act_cfg=None))\n            self.br_dcn_offset.append(\n                ConvModule(\n                    self.guiding_shift_channels,\n                    self.feat_adaption_conv_kernel**2 *\n                    self.guiding_shift_channels,\n                    1,\n                    bias=False,\n                    act_cfg=None))\n\n            self.tl_centripetal_shift.append(\n                self._make_layers(\n                    out_channels=self.centripetal_shift_channels,\n                    in_channels=self.in_channels))\n            self.br_centripetal_shift.append(\n                self._make_layers(\n                    out_channels=self.centripetal_shift_channels,\n                    in_channels=self.in_channels))\n\n    def _init_layers(self):\n        \"\"\"Initialize layers for CentripetalHead.\n\n        Including two parts: CornerHead layers and CentripetalHead layers\n        \"\"\"\n        super()._init_layers()  # using _init_layers in CornerHead\n        self._init_centripetal_layers()\n\n    def init_weights(self):\n        super(CentripetalHead, self).init_weights()\n        for i in range(self.num_feat_levels):\n            normal_init(self.tl_feat_adaption[i], std=0.01)\n            normal_init(self.br_feat_adaption[i], std=0.01)\n            normal_init(self.tl_dcn_offset[i].conv, std=0.1)\n            normal_init(self.br_dcn_offset[i].conv, std=0.1)\n            _ = [x.conv.reset_parameters() for x in self.tl_guiding_shift[i]]\n            _ = [x.conv.reset_parameters() for x in self.br_guiding_shift[i]]\n            _ = [\n                x.conv.reset_parameters() for x in self.tl_centripetal_shift[i]\n            ]\n            _ = [\n                x.conv.reset_parameters() for x in self.br_centripetal_shift[i]\n            ]\n\n    def forward_single(self, x, lvl_ind):\n        \"\"\"Forward feature of a single level.\n\n        Args:\n            x (Tensor): Feature of a single level.\n            lvl_ind (int): Level index of current feature.\n\n        Returns:\n            tuple[Tensor]: A tuple of CentripetalHead's output for current\n            feature level. Containing the following Tensors:\n\n                - tl_heat (Tensor): Predicted top-left corner heatmap.\n                - br_heat (Tensor): Predicted bottom-right corner heatmap.\n                - tl_off (Tensor): Predicted top-left offset heatmap.\n                - br_off (Tensor): Predicted bottom-right offset heatmap.\n                - tl_guiding_shift (Tensor): Predicted top-left guiding shift\n                  heatmap.\n                - br_guiding_shift (Tensor): Predicted bottom-right guiding\n                  shift heatmap.\n                - tl_centripetal_shift (Tensor): Predicted top-left centripetal\n                  shift heatmap.\n                - br_centripetal_shift (Tensor): Predicted bottom-right\n                  centripetal shift heatmap.\n        \"\"\"\n        tl_heat, br_heat, _, _, tl_off, br_off, tl_pool, br_pool = super(\n        ).forward_single(\n            x, lvl_ind, return_pool=True)\n\n        tl_guiding_shift = self.tl_guiding_shift[lvl_ind](tl_pool)\n        br_guiding_shift = self.br_guiding_shift[lvl_ind](br_pool)\n\n        tl_dcn_offset = self.tl_dcn_offset[lvl_ind](tl_guiding_shift.detach())\n        br_dcn_offset = self.br_dcn_offset[lvl_ind](br_guiding_shift.detach())\n\n        tl_feat_adaption = self.tl_feat_adaption[lvl_ind](tl_pool,\n                                                          tl_dcn_offset)\n        br_feat_adaption = self.br_feat_adaption[lvl_ind](br_pool,\n                                                          br_dcn_offset)\n\n        tl_centripetal_shift = self.tl_centripetal_shift[lvl_ind](\n            tl_feat_adaption)\n        br_centripetal_shift = self.br_centripetal_shift[lvl_ind](\n            br_feat_adaption)\n\n        result_list = [\n            tl_heat, br_heat, tl_off, br_off, tl_guiding_shift,\n            br_guiding_shift, tl_centripetal_shift, br_centripetal_shift\n        ]\n        return result_list\n\n    @force_fp32()\n    def loss(self,\n             tl_heats,\n             br_heats,\n             tl_offs,\n             br_offs,\n             tl_guiding_shifts,\n             br_guiding_shifts,\n             tl_centripetal_shifts,\n             br_centripetal_shifts,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n                with shape (N, num_classes, H, W).\n            br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n                level with shape (N, num_classes, H, W).\n            tl_offs (list[Tensor]): Top-left corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            br_offs (list[Tensor]): Bottom-right corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each\n                level with shape (N, guiding_shift_channels, H, W).\n            br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for\n                each level with shape (N, guiding_shift_channels, H, W).\n            tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts\n                for each level with shape (N, centripetal_shift_channels, H,\n                W).\n            br_centripetal_shifts (list[Tensor]): Bottom-right centripetal\n                shifts for each level with shape (N,\n                centripetal_shift_channels, H, W).\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [left, top, right, bottom] format.\n            gt_labels (list[Tensor]): Class indices corresponding to each box.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components. Containing the\n            following losses:\n\n                - det_loss (list[Tensor]): Corner keypoint losses of all\n                  feature levels.\n                - off_loss (list[Tensor]): Corner offset losses of all feature\n                  levels.\n                - guiding_loss (list[Tensor]): Guiding shift losses of all\n                  feature levels.\n                - centripetal_loss (list[Tensor]): Centripetal shift losses of\n                  all feature levels.\n        \"\"\"\n        targets = self.get_targets(\n            gt_bboxes,\n            gt_labels,\n            tl_heats[-1].shape,\n            img_metas[0]['pad_shape'],\n            with_corner_emb=self.with_corner_emb,\n            with_guiding_shift=True,\n            with_centripetal_shift=True)\n        mlvl_targets = [targets for _ in range(self.num_feat_levels)]\n        [det_losses, off_losses, guiding_losses, centripetal_losses\n         ] = multi_apply(self.loss_single, tl_heats, br_heats, tl_offs,\n                         br_offs, tl_guiding_shifts, br_guiding_shifts,\n                         tl_centripetal_shifts, br_centripetal_shifts,\n                         mlvl_targets)\n        loss_dict = dict(\n            det_loss=det_losses,\n            off_loss=off_losses,\n            guiding_loss=guiding_losses,\n            centripetal_loss=centripetal_losses)\n        return loss_dict\n\n    def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift,\n                    br_guiding_shift, tl_centripetal_shift,\n                    br_centripetal_shift, targets):\n        \"\"\"Compute losses for single level.\n\n        Args:\n            tl_hmp (Tensor): Top-left corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            br_hmp (Tensor): Bottom-right corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            tl_off (Tensor): Top-left corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            br_off (Tensor): Bottom-right corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            tl_guiding_shift (Tensor): Top-left guiding shift for current level\n                with shape (N, guiding_shift_channels, H, W).\n            br_guiding_shift (Tensor): Bottom-right guiding shift for current\n                level with shape (N, guiding_shift_channels, H, W).\n            tl_centripetal_shift (Tensor): Top-left centripetal shift for\n                current level with shape (N, centripetal_shift_channels, H, W).\n            br_centripetal_shift (Tensor): Bottom-right centripetal shift for\n                current level with shape (N, centripetal_shift_channels, H, W).\n            targets (dict): Corner target generated by `get_targets`.\n\n        Returns:\n            tuple[torch.Tensor]: Losses of the head's different branches\n            containing the following losses:\n\n                - det_loss (Tensor): Corner keypoint loss.\n                - off_loss (Tensor): Corner offset loss.\n                - guiding_loss (Tensor): Guiding shift loss.\n                - centripetal_loss (Tensor): Centripetal shift loss.\n        \"\"\"\n        targets['corner_embedding'] = None\n\n        det_loss, _, _, off_loss = super().loss_single(tl_hmp, br_hmp, None,\n                                                       None, tl_off, br_off,\n                                                       targets)\n\n        gt_tl_guiding_shift = targets['topleft_guiding_shift']\n        gt_br_guiding_shift = targets['bottomright_guiding_shift']\n        gt_tl_centripetal_shift = targets['topleft_centripetal_shift']\n        gt_br_centripetal_shift = targets['bottomright_centripetal_shift']\n\n        gt_tl_heatmap = targets['topleft_heatmap']\n        gt_br_heatmap = targets['bottomright_heatmap']\n        # We only compute the offset loss at the real corner position.\n        # The value of real corner would be 1 in heatmap ground truth.\n        # The mask is computed in class agnostic mode and its shape is\n        # batch * 1 * width * height.\n        tl_mask = gt_tl_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n            gt_tl_heatmap)\n        br_mask = gt_br_heatmap.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n            gt_br_heatmap)\n\n        # Guiding shift loss\n        tl_guiding_loss = self.loss_guiding_shift(\n            tl_guiding_shift,\n            gt_tl_guiding_shift,\n            tl_mask,\n            avg_factor=tl_mask.sum())\n        br_guiding_loss = self.loss_guiding_shift(\n            br_guiding_shift,\n            gt_br_guiding_shift,\n            br_mask,\n            avg_factor=br_mask.sum())\n        guiding_loss = (tl_guiding_loss + br_guiding_loss) / 2.0\n        # Centripetal shift loss\n        tl_centripetal_loss = self.loss_centripetal_shift(\n            tl_centripetal_shift,\n            gt_tl_centripetal_shift,\n            tl_mask,\n            avg_factor=tl_mask.sum())\n        br_centripetal_loss = self.loss_centripetal_shift(\n            br_centripetal_shift,\n            gt_br_centripetal_shift,\n            br_mask,\n            avg_factor=br_mask.sum())\n        centripetal_loss = (tl_centripetal_loss + br_centripetal_loss) / 2.0\n\n        return det_loss, off_loss, guiding_loss, centripetal_loss\n\n    @force_fp32()\n    def get_bboxes(self,\n                   tl_heats,\n                   br_heats,\n                   tl_offs,\n                   br_offs,\n                   tl_guiding_shifts,\n                   br_guiding_shifts,\n                   tl_centripetal_shifts,\n                   br_centripetal_shifts,\n                   img_metas,\n                   rescale=False,\n                   with_nms=True):\n        \"\"\"Transform network output for a batch into bbox predictions.\n\n        Args:\n            tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n                with shape (N, num_classes, H, W).\n            br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n                level with shape (N, num_classes, H, W).\n            tl_offs (list[Tensor]): Top-left corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            br_offs (list[Tensor]): Bottom-right corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            tl_guiding_shifts (list[Tensor]): Top-left guiding shifts for each\n                level with shape (N, guiding_shift_channels, H, W). Useless in\n                this function, we keep this arg because it's the raw output\n                from CentripetalHead.\n            br_guiding_shifts (list[Tensor]): Bottom-right guiding shifts for\n                each level with shape (N, guiding_shift_channels, H, W).\n                Useless in this function, we keep this arg because it's the\n                raw output from CentripetalHead.\n            tl_centripetal_shifts (list[Tensor]): Top-left centripetal shifts\n                for each level with shape (N, centripetal_shift_channels, H,\n                W).\n            br_centripetal_shifts (list[Tensor]): Bottom-right centripetal\n                shifts for each level with shape (N,\n                centripetal_shift_channels, H, W).\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n        \"\"\"\n        assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)\n        result_list = []\n        for img_id in range(len(img_metas)):\n            result_list.append(\n                self._get_bboxes_single(\n                    tl_heats[-1][img_id:img_id + 1, :],\n                    br_heats[-1][img_id:img_id + 1, :],\n                    tl_offs[-1][img_id:img_id + 1, :],\n                    br_offs[-1][img_id:img_id + 1, :],\n                    img_metas[img_id],\n                    tl_emb=None,\n                    br_emb=None,\n                    tl_centripetal_shift=tl_centripetal_shifts[-1][\n                        img_id:img_id + 1, :],\n                    br_centripetal_shift=br_centripetal_shifts[-1][\n                        img_id:img_id + 1, :],\n                    rescale=rescale,\n                    with_nms=with_nms))\n\n        return result_list\n"
  },
  {
    "path": "mmdet/models/dense_heads/corner_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom logging import warning\nfrom math import ceil, log\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, bias_init_with_prob\nfrom mmcv.ops import CornerPool, batched_nms\nfrom mmcv.runner import BaseModule, force_fp32\n\nfrom mmdet.core import multi_apply\nfrom ..builder import HEADS, build_loss\nfrom ..utils import gaussian_radius, gen_gaussian_target\nfrom ..utils.gaussian_target import (gather_feat, get_local_maximum,\n                                     get_topk_from_heatmap,\n                                     transpose_and_gather_feat)\nfrom .base_dense_head import BaseDenseHead\nfrom .dense_test_mixins import BBoxTestMixin\n\n\nclass BiCornerPool(BaseModule):\n    \"\"\"Bidirectional Corner Pooling Module (TopLeft, BottomRight, etc.)\n\n    Args:\n        in_channels (int): Input channels of module.\n        out_channels (int): Output channels of module.\n        feat_channels (int): Feature channels of module.\n        directions (list[str]): Directions of two CornerPools.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 directions,\n                 feat_channels=128,\n                 out_channels=128,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 init_cfg=None):\n        super(BiCornerPool, self).__init__(init_cfg)\n        self.direction1_conv = ConvModule(\n            in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n        self.direction2_conv = ConvModule(\n            in_channels, feat_channels, 3, padding=1, norm_cfg=norm_cfg)\n\n        self.aftpool_conv = ConvModule(\n            feat_channels,\n            out_channels,\n            3,\n            padding=1,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n        self.conv1 = ConvModule(\n            in_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)\n        self.conv2 = ConvModule(\n            in_channels, out_channels, 3, padding=1, norm_cfg=norm_cfg)\n\n        self.direction1_pool = CornerPool(directions[0])\n        self.direction2_pool = CornerPool(directions[1])\n        self.relu = nn.ReLU(inplace=True)\n\n    def forward(self, x):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            x (tensor): Input feature of BiCornerPool.\n\n        Returns:\n            conv2 (tensor): Output feature of BiCornerPool.\n        \"\"\"\n        direction1_conv = self.direction1_conv(x)\n        direction2_conv = self.direction2_conv(x)\n        direction1_feat = self.direction1_pool(direction1_conv)\n        direction2_feat = self.direction2_pool(direction2_conv)\n        aftpool_conv = self.aftpool_conv(direction1_feat + direction2_feat)\n        conv1 = self.conv1(x)\n        relu = self.relu(aftpool_conv + conv1)\n        conv2 = self.conv2(relu)\n        return conv2\n\n\n@HEADS.register_module()\nclass CornerHead(BaseDenseHead, BBoxTestMixin):\n    \"\"\"Head of CornerNet: Detecting Objects as Paired Keypoints.\n\n    Code is modified from the `official github repo\n    <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/\n    kp.py#L73>`_ .\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/1808.01244>`_ .\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        num_feat_levels (int): Levels of feature from the previous module. 2\n            for HourglassNet-104 and 1 for HourglassNet-52. Because\n            HourglassNet-104 outputs the final feature and intermediate\n            supervision feature and HourglassNet-52 only outputs the final\n            feature. Default: 2.\n        corner_emb_channels (int): Channel of embedding vector. Default: 1.\n        train_cfg (dict | None): Training config. Useless in CornerHead,\n            but we keep this variable for SingleStageDetector. Default: None.\n        test_cfg (dict | None): Testing config of CornerHead. Default: None.\n        loss_heatmap (dict | None): Config of corner heatmap loss. Default:\n            GaussianFocalLoss.\n        loss_embedding (dict | None): Config of corner embedding loss. Default:\n            AssociativeEmbeddingLoss.\n        loss_offset (dict | None): Config of corner offset loss. Default:\n            SmoothL1Loss.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 num_feat_levels=2,\n                 corner_emb_channels=1,\n                 train_cfg=None,\n                 test_cfg=None,\n                 loss_heatmap=dict(\n                     type='GaussianFocalLoss',\n                     alpha=2.0,\n                     gamma=4.0,\n                     loss_weight=1),\n                 loss_embedding=dict(\n                     type='AssociativeEmbeddingLoss',\n                     pull_weight=0.25,\n                     push_weight=0.25),\n                 loss_offset=dict(\n                     type='SmoothL1Loss', beta=1.0, loss_weight=1),\n                 init_cfg=None):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super(CornerHead, self).__init__(init_cfg)\n        self.num_classes = num_classes\n        self.in_channels = in_channels\n        self.corner_emb_channels = corner_emb_channels\n        self.with_corner_emb = self.corner_emb_channels > 0\n        self.corner_offset_channels = 2\n        self.num_feat_levels = num_feat_levels\n        self.loss_heatmap = build_loss(\n            loss_heatmap) if loss_heatmap is not None else None\n        self.loss_embedding = build_loss(\n            loss_embedding) if loss_embedding is not None else None\n        self.loss_offset = build_loss(\n            loss_offset) if loss_offset is not None else None\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n        self.fp16_enabled = False\n        self._init_layers()\n\n    def _make_layers(self, out_channels, in_channels=256, feat_channels=256):\n        \"\"\"Initialize conv sequential for CornerHead.\"\"\"\n        return nn.Sequential(\n            ConvModule(in_channels, feat_channels, 3, padding=1),\n            ConvModule(\n                feat_channels, out_channels, 1, norm_cfg=None, act_cfg=None))\n\n    def _init_corner_kpt_layers(self):\n        \"\"\"Initialize corner keypoint layers.\n\n        Including corner heatmap branch and corner offset branch. Each branch\n        has two parts: prefix `tl_` for top-left and `br_` for bottom-right.\n        \"\"\"\n        self.tl_pool, self.br_pool = nn.ModuleList(), nn.ModuleList()\n        self.tl_heat, self.br_heat = nn.ModuleList(), nn.ModuleList()\n        self.tl_off, self.br_off = nn.ModuleList(), nn.ModuleList()\n\n        for _ in range(self.num_feat_levels):\n            self.tl_pool.append(\n                BiCornerPool(\n                    self.in_channels, ['top', 'left'],\n                    out_channels=self.in_channels))\n            self.br_pool.append(\n                BiCornerPool(\n                    self.in_channels, ['bottom', 'right'],\n                    out_channels=self.in_channels))\n\n            self.tl_heat.append(\n                self._make_layers(\n                    out_channels=self.num_classes,\n                    in_channels=self.in_channels))\n            self.br_heat.append(\n                self._make_layers(\n                    out_channels=self.num_classes,\n                    in_channels=self.in_channels))\n\n            self.tl_off.append(\n                self._make_layers(\n                    out_channels=self.corner_offset_channels,\n                    in_channels=self.in_channels))\n            self.br_off.append(\n                self._make_layers(\n                    out_channels=self.corner_offset_channels,\n                    in_channels=self.in_channels))\n\n    def _init_corner_emb_layers(self):\n        \"\"\"Initialize corner embedding layers.\n\n        Only include corner embedding branch with two parts: prefix `tl_` for\n        top-left and `br_` for bottom-right.\n        \"\"\"\n        self.tl_emb, self.br_emb = nn.ModuleList(), nn.ModuleList()\n\n        for _ in range(self.num_feat_levels):\n            self.tl_emb.append(\n                self._make_layers(\n                    out_channels=self.corner_emb_channels,\n                    in_channels=self.in_channels))\n            self.br_emb.append(\n                self._make_layers(\n                    out_channels=self.corner_emb_channels,\n                    in_channels=self.in_channels))\n\n    def _init_layers(self):\n        \"\"\"Initialize layers for CornerHead.\n\n        Including two parts: corner keypoint layers and corner embedding layers\n        \"\"\"\n        self._init_corner_kpt_layers()\n        if self.with_corner_emb:\n            self._init_corner_emb_layers()\n\n    def init_weights(self):\n        super(CornerHead, self).init_weights()\n        bias_init = bias_init_with_prob(0.1)\n        for i in range(self.num_feat_levels):\n            # The initialization of parameters are different between\n            # nn.Conv2d and ConvModule. Our experiments show that\n            # using the original initialization of nn.Conv2d increases\n            # the final mAP by about 0.2%\n            self.tl_heat[i][-1].conv.reset_parameters()\n            self.tl_heat[i][-1].conv.bias.data.fill_(bias_init)\n            self.br_heat[i][-1].conv.reset_parameters()\n            self.br_heat[i][-1].conv.bias.data.fill_(bias_init)\n            self.tl_off[i][-1].conv.reset_parameters()\n            self.br_off[i][-1].conv.reset_parameters()\n            if self.with_corner_emb:\n                self.tl_emb[i][-1].conv.reset_parameters()\n                self.br_emb[i][-1].conv.reset_parameters()\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of corner heatmaps, offset heatmaps and\n            embedding heatmaps.\n                - tl_heats (list[Tensor]): Top-left corner heatmaps for all\n                  levels, each is a 4D-tensor, the channels number is\n                  num_classes.\n                - br_heats (list[Tensor]): Bottom-right corner heatmaps for all\n                  levels, each is a 4D-tensor, the channels number is\n                  num_classes.\n                - tl_embs (list[Tensor] | list[None]): Top-left embedding\n                  heatmaps for all levels, each is a 4D-tensor or None.\n                  If not None, the channels number is corner_emb_channels.\n                - br_embs (list[Tensor] | list[None]): Bottom-right embedding\n                  heatmaps for all levels, each is a 4D-tensor or None.\n                  If not None, the channels number is corner_emb_channels.\n                - tl_offs (list[Tensor]): Top-left offset heatmaps for all\n                  levels, each is a 4D-tensor. The channels number is\n                  corner_offset_channels.\n                - br_offs (list[Tensor]): Bottom-right offset heatmaps for all\n                  levels, each is a 4D-tensor. The channels number is\n                  corner_offset_channels.\n        \"\"\"\n        lvl_ind = list(range(self.num_feat_levels))\n        return multi_apply(self.forward_single, feats, lvl_ind)\n\n    def forward_single(self, x, lvl_ind, return_pool=False):\n        \"\"\"Forward feature of a single level.\n\n        Args:\n            x (Tensor): Feature of a single level.\n            lvl_ind (int): Level index of current feature.\n            return_pool (bool): Return corner pool feature or not.\n\n        Returns:\n            tuple[Tensor]: A tuple of CornerHead's output for current feature\n            level. Containing the following Tensors:\n\n                - tl_heat (Tensor): Predicted top-left corner heatmap.\n                - br_heat (Tensor): Predicted bottom-right corner heatmap.\n                - tl_emb (Tensor | None): Predicted top-left embedding heatmap.\n                  None for `self.with_corner_emb == False`.\n                - br_emb (Tensor | None): Predicted bottom-right embedding\n                  heatmap. None for `self.with_corner_emb == False`.\n                - tl_off (Tensor): Predicted top-left offset heatmap.\n                - br_off (Tensor): Predicted bottom-right offset heatmap.\n                - tl_pool (Tensor): Top-left corner pool feature. Not must\n                  have.\n                - br_pool (Tensor): Bottom-right corner pool feature. Not must\n                  have.\n        \"\"\"\n        tl_pool = self.tl_pool[lvl_ind](x)\n        tl_heat = self.tl_heat[lvl_ind](tl_pool)\n        br_pool = self.br_pool[lvl_ind](x)\n        br_heat = self.br_heat[lvl_ind](br_pool)\n\n        tl_emb, br_emb = None, None\n        if self.with_corner_emb:\n            tl_emb = self.tl_emb[lvl_ind](tl_pool)\n            br_emb = self.br_emb[lvl_ind](br_pool)\n\n        tl_off = self.tl_off[lvl_ind](tl_pool)\n        br_off = self.br_off[lvl_ind](br_pool)\n\n        result_list = [tl_heat, br_heat, tl_emb, br_emb, tl_off, br_off]\n        if return_pool:\n            result_list.append(tl_pool)\n            result_list.append(br_pool)\n\n        return result_list\n\n    def get_targets(self,\n                    gt_bboxes,\n                    gt_labels,\n                    feat_shape,\n                    img_shape,\n                    with_corner_emb=False,\n                    with_guiding_shift=False,\n                    with_centripetal_shift=False):\n        \"\"\"Generate corner targets.\n\n        Including corner heatmap, corner offset.\n\n        Optional: corner embedding, corner guiding shift, centripetal shift.\n\n        For CornerNet, we generate corner heatmap, corner offset and corner\n        embedding from this function.\n\n        For CentripetalNet, we generate corner heatmap, corner offset, guiding\n        shift and centripetal shift from this function.\n\n        Args:\n            gt_bboxes (list[Tensor]): Ground truth bboxes of each image, each\n                has shape (num_gt, 4).\n            gt_labels (list[Tensor]): Ground truth labels of each box, each has\n                shape (num_gt,).\n            feat_shape (list[int]): Shape of output feature,\n                [batch, channel, height, width].\n            img_shape (list[int]): Shape of input image,\n                [height, width, channel].\n            with_corner_emb (bool): Generate corner embedding target or not.\n                Default: False.\n            with_guiding_shift (bool): Generate guiding shift target or not.\n                Default: False.\n            with_centripetal_shift (bool): Generate centripetal shift target or\n                not. Default: False.\n\n        Returns:\n            dict: Ground truth of corner heatmap, corner offset, corner\n            embedding, guiding shift and centripetal shift. Containing the\n            following keys:\n\n                - topleft_heatmap (Tensor): Ground truth top-left corner\n                  heatmap.\n                - bottomright_heatmap (Tensor): Ground truth bottom-right\n                  corner heatmap.\n                - topleft_offset (Tensor): Ground truth top-left corner offset.\n                - bottomright_offset (Tensor): Ground truth bottom-right corner\n                  offset.\n                - corner_embedding (list[list[list[int]]]): Ground truth corner\n                  embedding. Not must have.\n                - topleft_guiding_shift (Tensor): Ground truth top-left corner\n                  guiding shift. Not must have.\n                - bottomright_guiding_shift (Tensor): Ground truth bottom-right\n                  corner guiding shift. Not must have.\n                - topleft_centripetal_shift (Tensor): Ground truth top-left\n                  corner centripetal shift. Not must have.\n                - bottomright_centripetal_shift (Tensor): Ground truth\n                  bottom-right corner centripetal shift. Not must have.\n        \"\"\"\n        batch_size, _, height, width = feat_shape\n        img_h, img_w = img_shape[:2]\n\n        width_ratio = float(width / img_w)\n        height_ratio = float(height / img_h)\n\n        gt_tl_heatmap = gt_bboxes[-1].new_zeros(\n            [batch_size, self.num_classes, height, width])\n        gt_br_heatmap = gt_bboxes[-1].new_zeros(\n            [batch_size, self.num_classes, height, width])\n        gt_tl_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])\n        gt_br_offset = gt_bboxes[-1].new_zeros([batch_size, 2, height, width])\n\n        if with_corner_emb:\n            match = []\n\n        # Guiding shift is a kind of offset, from center to corner\n        if with_guiding_shift:\n            gt_tl_guiding_shift = gt_bboxes[-1].new_zeros(\n                [batch_size, 2, height, width])\n            gt_br_guiding_shift = gt_bboxes[-1].new_zeros(\n                [batch_size, 2, height, width])\n        # Centripetal shift is also a kind of offset, from center to corner\n        # and normalized by log.\n        if with_centripetal_shift:\n            gt_tl_centripetal_shift = gt_bboxes[-1].new_zeros(\n                [batch_size, 2, height, width])\n            gt_br_centripetal_shift = gt_bboxes[-1].new_zeros(\n                [batch_size, 2, height, width])\n\n        for batch_id in range(batch_size):\n            # Ground truth of corner embedding per image is a list of coord set\n            corner_match = []\n            for box_id in range(len(gt_labels[batch_id])):\n                left, top, right, bottom = gt_bboxes[batch_id][box_id]\n                center_x = (left + right) / 2.0\n                center_y = (top + bottom) / 2.0\n                label = gt_labels[batch_id][box_id]\n\n                # Use coords in the feature level to generate ground truth\n                scale_left = left * width_ratio\n                scale_right = right * width_ratio\n                scale_top = top * height_ratio\n                scale_bottom = bottom * height_ratio\n                scale_center_x = center_x * width_ratio\n                scale_center_y = center_y * height_ratio\n\n                # Int coords on feature map/ground truth tensor\n                left_idx = int(min(scale_left, width - 1))\n                right_idx = int(min(scale_right, width - 1))\n                top_idx = int(min(scale_top, height - 1))\n                bottom_idx = int(min(scale_bottom, height - 1))\n\n                # Generate gaussian heatmap\n                scale_box_width = ceil(scale_right - scale_left)\n                scale_box_height = ceil(scale_bottom - scale_top)\n                radius = gaussian_radius((scale_box_height, scale_box_width),\n                                         min_overlap=0.3)\n                radius = max(0, int(radius))\n                gt_tl_heatmap[batch_id, label] = gen_gaussian_target(\n                    gt_tl_heatmap[batch_id, label], [left_idx, top_idx],\n                    radius)\n                gt_br_heatmap[batch_id, label] = gen_gaussian_target(\n                    gt_br_heatmap[batch_id, label], [right_idx, bottom_idx],\n                    radius)\n\n                # Generate corner offset\n                left_offset = scale_left - left_idx\n                top_offset = scale_top - top_idx\n                right_offset = scale_right - right_idx\n                bottom_offset = scale_bottom - bottom_idx\n                gt_tl_offset[batch_id, 0, top_idx, left_idx] = left_offset\n                gt_tl_offset[batch_id, 1, top_idx, left_idx] = top_offset\n                gt_br_offset[batch_id, 0, bottom_idx, right_idx] = right_offset\n                gt_br_offset[batch_id, 1, bottom_idx,\n                             right_idx] = bottom_offset\n\n                # Generate corner embedding\n                if with_corner_emb:\n                    corner_match.append([[top_idx, left_idx],\n                                         [bottom_idx, right_idx]])\n                # Generate guiding shift\n                if with_guiding_shift:\n                    gt_tl_guiding_shift[batch_id, 0, top_idx,\n                                        left_idx] = scale_center_x - left_idx\n                    gt_tl_guiding_shift[batch_id, 1, top_idx,\n                                        left_idx] = scale_center_y - top_idx\n                    gt_br_guiding_shift[batch_id, 0, bottom_idx,\n                                        right_idx] = right_idx - scale_center_x\n                    gt_br_guiding_shift[\n                        batch_id, 1, bottom_idx,\n                        right_idx] = bottom_idx - scale_center_y\n                # Generate centripetal shift\n                if with_centripetal_shift:\n                    gt_tl_centripetal_shift[batch_id, 0, top_idx,\n                                            left_idx] = log(scale_center_x -\n                                                            scale_left)\n                    gt_tl_centripetal_shift[batch_id, 1, top_idx,\n                                            left_idx] = log(scale_center_y -\n                                                            scale_top)\n                    gt_br_centripetal_shift[batch_id, 0, bottom_idx,\n                                            right_idx] = log(scale_right -\n                                                             scale_center_x)\n                    gt_br_centripetal_shift[batch_id, 1, bottom_idx,\n                                            right_idx] = log(scale_bottom -\n                                                             scale_center_y)\n\n            if with_corner_emb:\n                match.append(corner_match)\n\n        target_result = dict(\n            topleft_heatmap=gt_tl_heatmap,\n            topleft_offset=gt_tl_offset,\n            bottomright_heatmap=gt_br_heatmap,\n            bottomright_offset=gt_br_offset)\n\n        if with_corner_emb:\n            target_result.update(corner_embedding=match)\n        if with_guiding_shift:\n            target_result.update(\n                topleft_guiding_shift=gt_tl_guiding_shift,\n                bottomright_guiding_shift=gt_br_guiding_shift)\n        if with_centripetal_shift:\n            target_result.update(\n                topleft_centripetal_shift=gt_tl_centripetal_shift,\n                bottomright_centripetal_shift=gt_br_centripetal_shift)\n\n        return target_result\n\n    @force_fp32()\n    def loss(self,\n             tl_heats,\n             br_heats,\n             tl_embs,\n             br_embs,\n             tl_offs,\n             br_offs,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n                with shape (N, num_classes, H, W).\n            br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n                level with shape (N, num_classes, H, W).\n            tl_embs (list[Tensor]): Top-left corner embeddings for each level\n                with shape (N, corner_emb_channels, H, W).\n            br_embs (list[Tensor]): Bottom-right corner embeddings for each\n                level with shape (N, corner_emb_channels, H, W).\n            tl_offs (list[Tensor]): Top-left corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            br_offs (list[Tensor]): Bottom-right corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [left, top, right, bottom] format.\n            gt_labels (list[Tensor]): Class indices corresponding to each box.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components. Containing the\n            following losses:\n\n                - det_loss (list[Tensor]): Corner keypoint losses of all\n                  feature levels.\n                - pull_loss (list[Tensor]): Part one of AssociativeEmbedding\n                  losses of all feature levels.\n                - push_loss (list[Tensor]): Part two of AssociativeEmbedding\n                  losses of all feature levels.\n                - off_loss (list[Tensor]): Corner offset losses of all feature\n                  levels.\n        \"\"\"\n        targets = self.get_targets(\n            gt_bboxes,\n            gt_labels,\n            tl_heats[-1].shape,\n            img_metas[0]['pad_shape'],\n            with_corner_emb=self.with_corner_emb)\n        mlvl_targets = [targets for _ in range(self.num_feat_levels)]\n        det_losses, pull_losses, push_losses, off_losses = multi_apply(\n            self.loss_single, tl_heats, br_heats, tl_embs, br_embs, tl_offs,\n            br_offs, mlvl_targets)\n        loss_dict = dict(det_loss=det_losses, off_loss=off_losses)\n        if self.with_corner_emb:\n            loss_dict.update(pull_loss=pull_losses, push_loss=push_losses)\n        return loss_dict\n\n    def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,\n                    targets):\n        \"\"\"Compute losses for single level.\n\n        Args:\n            tl_hmp (Tensor): Top-left corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            br_hmp (Tensor): Bottom-right corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            tl_emb (Tensor): Top-left corner embedding for current level with\n                shape (N, corner_emb_channels, H, W).\n            br_emb (Tensor): Bottom-right corner embedding for current level\n                with shape (N, corner_emb_channels, H, W).\n            tl_off (Tensor): Top-left corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            br_off (Tensor): Bottom-right corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            targets (dict): Corner target generated by `get_targets`.\n\n        Returns:\n            tuple[torch.Tensor]: Losses of the head's different branches\n            containing the following losses:\n\n                - det_loss (Tensor): Corner keypoint loss.\n                - pull_loss (Tensor): Part one of AssociativeEmbedding loss.\n                - push_loss (Tensor): Part two of AssociativeEmbedding loss.\n                - off_loss (Tensor): Corner offset loss.\n        \"\"\"\n        gt_tl_hmp = targets['topleft_heatmap']\n        gt_br_hmp = targets['bottomright_heatmap']\n        gt_tl_off = targets['topleft_offset']\n        gt_br_off = targets['bottomright_offset']\n        gt_embedding = targets['corner_embedding']\n\n        # Detection loss\n        tl_det_loss = self.loss_heatmap(\n            tl_hmp.sigmoid(),\n            gt_tl_hmp,\n            avg_factor=max(1,\n                           gt_tl_hmp.eq(1).sum()))\n        br_det_loss = self.loss_heatmap(\n            br_hmp.sigmoid(),\n            gt_br_hmp,\n            avg_factor=max(1,\n                           gt_br_hmp.eq(1).sum()))\n        det_loss = (tl_det_loss + br_det_loss) / 2.0\n\n        # AssociativeEmbedding loss\n        if self.with_corner_emb and self.loss_embedding is not None:\n            pull_loss, push_loss = self.loss_embedding(tl_emb, br_emb,\n                                                       gt_embedding)\n        else:\n            pull_loss, push_loss = None, None\n\n        # Offset loss\n        # We only compute the offset loss at the real corner position.\n        # The value of real corner would be 1 in heatmap ground truth.\n        # The mask is computed in class agnostic mode and its shape is\n        # batch * 1 * width * height.\n        tl_off_mask = gt_tl_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n            gt_tl_hmp)\n        br_off_mask = gt_br_hmp.eq(1).sum(1).gt(0).unsqueeze(1).type_as(\n            gt_br_hmp)\n        tl_off_loss = self.loss_offset(\n            tl_off,\n            gt_tl_off,\n            tl_off_mask,\n            avg_factor=max(1, tl_off_mask.sum()))\n        br_off_loss = self.loss_offset(\n            br_off,\n            gt_br_off,\n            br_off_mask,\n            avg_factor=max(1, br_off_mask.sum()))\n\n        off_loss = (tl_off_loss + br_off_loss) / 2.0\n\n        return det_loss, pull_loss, push_loss, off_loss\n\n    @force_fp32()\n    def get_bboxes(self,\n                   tl_heats,\n                   br_heats,\n                   tl_embs,\n                   br_embs,\n                   tl_offs,\n                   br_offs,\n                   img_metas,\n                   rescale=False,\n                   with_nms=True):\n        \"\"\"Transform network output for a batch into bbox predictions.\n\n        Args:\n            tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n                with shape (N, num_classes, H, W).\n            br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n                level with shape (N, num_classes, H, W).\n            tl_embs (list[Tensor]): Top-left corner embeddings for each level\n                with shape (N, corner_emb_channels, H, W).\n            br_embs (list[Tensor]): Bottom-right corner embeddings for each\n                level with shape (N, corner_emb_channels, H, W).\n            tl_offs (list[Tensor]): Top-left corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            br_offs (list[Tensor]): Bottom-right corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n        \"\"\"\n        assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(img_metas)\n        result_list = []\n        for img_id in range(len(img_metas)):\n            result_list.append(\n                self._get_bboxes_single(\n                    tl_heats[-1][img_id:img_id + 1, :],\n                    br_heats[-1][img_id:img_id + 1, :],\n                    tl_offs[-1][img_id:img_id + 1, :],\n                    br_offs[-1][img_id:img_id + 1, :],\n                    img_metas[img_id],\n                    tl_emb=tl_embs[-1][img_id:img_id + 1, :],\n                    br_emb=br_embs[-1][img_id:img_id + 1, :],\n                    rescale=rescale,\n                    with_nms=with_nms))\n\n        return result_list\n\n    def _get_bboxes_single(self,\n                           tl_heat,\n                           br_heat,\n                           tl_off,\n                           br_off,\n                           img_meta,\n                           tl_emb=None,\n                           br_emb=None,\n                           tl_centripetal_shift=None,\n                           br_centripetal_shift=None,\n                           rescale=False,\n                           with_nms=True):\n        \"\"\"Transform outputs for a single batch item into bbox predictions.\n\n        Args:\n            tl_heat (Tensor): Top-left corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            br_heat (Tensor): Bottom-right corner heatmap for current level\n                with shape (N, num_classes, H, W).\n            tl_off (Tensor): Top-left corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            br_off (Tensor): Bottom-right corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            img_meta (dict): Meta information of current image, e.g.,\n                image size, scaling factor, etc.\n            tl_emb (Tensor): Top-left corner embedding for current level with\n                shape (N, corner_emb_channels, H, W).\n            br_emb (Tensor): Bottom-right corner embedding for current level\n                with shape (N, corner_emb_channels, H, W).\n            tl_centripetal_shift: Top-left corner's centripetal shift for\n                current level with shape (N, 2, H, W).\n            br_centripetal_shift: Bottom-right corner's centripetal shift for\n                current level with shape (N, 2, H, W).\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n        \"\"\"\n        if isinstance(img_meta, (list, tuple)):\n            img_meta = img_meta[0]\n\n        batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(\n            tl_heat=tl_heat.sigmoid(),\n            br_heat=br_heat.sigmoid(),\n            tl_off=tl_off,\n            br_off=br_off,\n            tl_emb=tl_emb,\n            br_emb=br_emb,\n            tl_centripetal_shift=tl_centripetal_shift,\n            br_centripetal_shift=br_centripetal_shift,\n            img_meta=img_meta,\n            k=self.test_cfg.corner_topk,\n            kernel=self.test_cfg.local_maximum_kernel,\n            distance_threshold=self.test_cfg.distance_threshold)\n\n        if rescale:\n            batch_bboxes /= batch_bboxes.new_tensor(img_meta['scale_factor'])\n\n        bboxes = batch_bboxes.view([-1, 4])\n        scores = batch_scores.view(-1)\n        clses = batch_clses.view(-1)\n\n        detections = torch.cat([bboxes, scores.unsqueeze(-1)], -1)\n        keepinds = (detections[:, -1] > -0.1)\n        detections = detections[keepinds]\n        labels = clses[keepinds]\n\n        if with_nms:\n            detections, labels = self._bboxes_nms(detections, labels,\n                                                  self.test_cfg)\n\n        return detections, labels\n\n    def _bboxes_nms(self, bboxes, labels, cfg):\n        if 'nms_cfg' in cfg:\n            warning.warn('nms_cfg in test_cfg will be deprecated. '\n                         'Please rename it as nms')\n        if 'nms' not in cfg:\n            cfg.nms = cfg.nms_cfg\n\n        if labels.numel() > 0:\n            max_num = cfg.max_per_img\n            bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:,\n                                                             -1].contiguous(),\n                                       labels, cfg.nms)\n            if max_num > 0:\n                bboxes = bboxes[:max_num]\n                labels = labels[keep][:max_num]\n\n        return bboxes, labels\n\n    def decode_heatmap(self,\n                       tl_heat,\n                       br_heat,\n                       tl_off,\n                       br_off,\n                       tl_emb=None,\n                       br_emb=None,\n                       tl_centripetal_shift=None,\n                       br_centripetal_shift=None,\n                       img_meta=None,\n                       k=100,\n                       kernel=3,\n                       distance_threshold=0.5,\n                       num_dets=1000):\n        \"\"\"Transform outputs for a single batch item into raw bbox predictions.\n\n        Args:\n            tl_heat (Tensor): Top-left corner heatmap for current level with\n                shape (N, num_classes, H, W).\n            br_heat (Tensor): Bottom-right corner heatmap for current level\n                with shape (N, num_classes, H, W).\n            tl_off (Tensor): Top-left corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            br_off (Tensor): Bottom-right corner offset for current level with\n                shape (N, corner_offset_channels, H, W).\n            tl_emb (Tensor | None): Top-left corner embedding for current\n                level with shape (N, corner_emb_channels, H, W).\n            br_emb (Tensor | None): Bottom-right corner embedding for current\n                level with shape (N, corner_emb_channels, H, W).\n            tl_centripetal_shift (Tensor | None): Top-left centripetal shift\n                for current level with shape (N, 2, H, W).\n            br_centripetal_shift (Tensor | None): Bottom-right centripetal\n                shift for current level with shape (N, 2, H, W).\n            img_meta (dict): Meta information of current image, e.g.,\n                image size, scaling factor, etc.\n            k (int): Get top k corner keypoints from heatmap.\n            kernel (int): Max pooling kernel for extract local maximum pixels.\n            distance_threshold (float): Distance threshold. Top-left and\n                bottom-right corner keypoints with feature distance less than\n                the threshold will be regarded as keypoints from same object.\n            num_dets (int): Num of raw boxes before doing nms.\n\n        Returns:\n            tuple[torch.Tensor]: Decoded output of CornerHead, containing the\n            following Tensors:\n\n            - bboxes (Tensor): Coords of each box.\n            - scores (Tensor): Scores of each box.\n            - clses (Tensor): Categories of each box.\n        \"\"\"\n        with_embedding = tl_emb is not None and br_emb is not None\n        with_centripetal_shift = (\n            tl_centripetal_shift is not None\n            and br_centripetal_shift is not None)\n        assert with_embedding + with_centripetal_shift == 1\n        batch, _, height, width = tl_heat.size()\n        if torch.onnx.is_in_onnx_export():\n            inp_h, inp_w = img_meta['pad_shape_for_onnx'][:2]\n        else:\n            inp_h, inp_w, _ = img_meta['pad_shape']\n\n        # perform nms on heatmaps\n        tl_heat = get_local_maximum(tl_heat, kernel=kernel)\n        br_heat = get_local_maximum(br_heat, kernel=kernel)\n\n        tl_scores, tl_inds, tl_clses, tl_ys, tl_xs = get_topk_from_heatmap(\n            tl_heat, k=k)\n        br_scores, br_inds, br_clses, br_ys, br_xs = get_topk_from_heatmap(\n            br_heat, k=k)\n\n        # We use repeat instead of expand here because expand is a\n        # shallow-copy function. Thus it could cause unexpected testing result\n        # sometimes. Using expand will decrease about 10% mAP during testing\n        # compared to repeat.\n        tl_ys = tl_ys.view(batch, k, 1).repeat(1, 1, k)\n        tl_xs = tl_xs.view(batch, k, 1).repeat(1, 1, k)\n        br_ys = br_ys.view(batch, 1, k).repeat(1, k, 1)\n        br_xs = br_xs.view(batch, 1, k).repeat(1, k, 1)\n\n        tl_off = transpose_and_gather_feat(tl_off, tl_inds)\n        tl_off = tl_off.view(batch, k, 1, 2)\n        br_off = transpose_and_gather_feat(br_off, br_inds)\n        br_off = br_off.view(batch, 1, k, 2)\n\n        tl_xs = tl_xs + tl_off[..., 0]\n        tl_ys = tl_ys + tl_off[..., 1]\n        br_xs = br_xs + br_off[..., 0]\n        br_ys = br_ys + br_off[..., 1]\n\n        if with_centripetal_shift:\n            tl_centripetal_shift = transpose_and_gather_feat(\n                tl_centripetal_shift, tl_inds).view(batch, k, 1, 2).exp()\n            br_centripetal_shift = transpose_and_gather_feat(\n                br_centripetal_shift, br_inds).view(batch, 1, k, 2).exp()\n\n            tl_ctxs = tl_xs + tl_centripetal_shift[..., 0]\n            tl_ctys = tl_ys + tl_centripetal_shift[..., 1]\n            br_ctxs = br_xs - br_centripetal_shift[..., 0]\n            br_ctys = br_ys - br_centripetal_shift[..., 1]\n\n        # all possible boxes based on top k corners (ignoring class)\n        tl_xs *= (inp_w / width)\n        tl_ys *= (inp_h / height)\n        br_xs *= (inp_w / width)\n        br_ys *= (inp_h / height)\n\n        if with_centripetal_shift:\n            tl_ctxs *= (inp_w / width)\n            tl_ctys *= (inp_h / height)\n            br_ctxs *= (inp_w / width)\n            br_ctys *= (inp_h / height)\n\n        x_off, y_off = 0, 0  # no crop\n        if not torch.onnx.is_in_onnx_export():\n            # since `RandomCenterCropPad` is done on CPU with numpy and it's\n            # not dynamic traceable when exporting to ONNX, thus 'border'\n            # does not appears as key in 'img_meta'. As a tmp solution,\n            # we move this 'border' handle part to the postprocess after\n            # finished exporting to ONNX, which is handle in\n            # `mmdet/core/export/model_wrappers.py`. Though difference between\n            # pytorch and exported onnx model, it might be ignored since\n            # comparable performance is achieved between them (e.g. 40.4 vs\n            # 40.6 on COCO val2017, for CornerNet without test-time flip)\n            if 'border' in img_meta:\n                x_off = img_meta['border'][2]\n                y_off = img_meta['border'][0]\n\n        tl_xs -= x_off\n        tl_ys -= y_off\n        br_xs -= x_off\n        br_ys -= y_off\n\n        zeros = tl_xs.new_zeros(*tl_xs.size())\n        tl_xs = torch.where(tl_xs > 0.0, tl_xs, zeros)\n        tl_ys = torch.where(tl_ys > 0.0, tl_ys, zeros)\n        br_xs = torch.where(br_xs > 0.0, br_xs, zeros)\n        br_ys = torch.where(br_ys > 0.0, br_ys, zeros)\n\n        bboxes = torch.stack((tl_xs, tl_ys, br_xs, br_ys), dim=3)\n        area_bboxes = ((br_xs - tl_xs) * (br_ys - tl_ys)).abs()\n\n        if with_centripetal_shift:\n            tl_ctxs -= x_off\n            tl_ctys -= y_off\n            br_ctxs -= x_off\n            br_ctys -= y_off\n\n            tl_ctxs *= tl_ctxs.gt(0.0).type_as(tl_ctxs)\n            tl_ctys *= tl_ctys.gt(0.0).type_as(tl_ctys)\n            br_ctxs *= br_ctxs.gt(0.0).type_as(br_ctxs)\n            br_ctys *= br_ctys.gt(0.0).type_as(br_ctys)\n\n            ct_bboxes = torch.stack((tl_ctxs, tl_ctys, br_ctxs, br_ctys),\n                                    dim=3)\n            area_ct_bboxes = ((br_ctxs - tl_ctxs) * (br_ctys - tl_ctys)).abs()\n\n            rcentral = torch.zeros_like(ct_bboxes)\n            # magic nums from paper section 4.1\n            mu = torch.ones_like(area_bboxes) / 2.4\n            mu[area_bboxes > 3500] = 1 / 2.1  # large bbox have smaller mu\n\n            bboxes_center_x = (bboxes[..., 0] + bboxes[..., 2]) / 2\n            bboxes_center_y = (bboxes[..., 1] + bboxes[..., 3]) / 2\n            rcentral[..., 0] = bboxes_center_x - mu * (bboxes[..., 2] -\n                                                       bboxes[..., 0]) / 2\n            rcentral[..., 1] = bboxes_center_y - mu * (bboxes[..., 3] -\n                                                       bboxes[..., 1]) / 2\n            rcentral[..., 2] = bboxes_center_x + mu * (bboxes[..., 2] -\n                                                       bboxes[..., 0]) / 2\n            rcentral[..., 3] = bboxes_center_y + mu * (bboxes[..., 3] -\n                                                       bboxes[..., 1]) / 2\n            area_rcentral = ((rcentral[..., 2] - rcentral[..., 0]) *\n                             (rcentral[..., 3] - rcentral[..., 1])).abs()\n            dists = area_ct_bboxes / area_rcentral\n\n            tl_ctx_inds = (ct_bboxes[..., 0] <= rcentral[..., 0]) | (\n                ct_bboxes[..., 0] >= rcentral[..., 2])\n            tl_cty_inds = (ct_bboxes[..., 1] <= rcentral[..., 1]) | (\n                ct_bboxes[..., 1] >= rcentral[..., 3])\n            br_ctx_inds = (ct_bboxes[..., 2] <= rcentral[..., 0]) | (\n                ct_bboxes[..., 2] >= rcentral[..., 2])\n            br_cty_inds = (ct_bboxes[..., 3] <= rcentral[..., 1]) | (\n                ct_bboxes[..., 3] >= rcentral[..., 3])\n\n        if with_embedding:\n            tl_emb = transpose_and_gather_feat(tl_emb, tl_inds)\n            tl_emb = tl_emb.view(batch, k, 1)\n            br_emb = transpose_and_gather_feat(br_emb, br_inds)\n            br_emb = br_emb.view(batch, 1, k)\n            dists = torch.abs(tl_emb - br_emb)\n\n        tl_scores = tl_scores.view(batch, k, 1).repeat(1, 1, k)\n        br_scores = br_scores.view(batch, 1, k).repeat(1, k, 1)\n\n        scores = (tl_scores + br_scores) / 2  # scores for all possible boxes\n\n        # tl and br should have same class\n        tl_clses = tl_clses.view(batch, k, 1).repeat(1, 1, k)\n        br_clses = br_clses.view(batch, 1, k).repeat(1, k, 1)\n        cls_inds = (tl_clses != br_clses)\n\n        # reject boxes based on distances\n        dist_inds = dists > distance_threshold\n\n        # reject boxes based on widths and heights\n        width_inds = (br_xs <= tl_xs)\n        height_inds = (br_ys <= tl_ys)\n\n        # No use `scores[cls_inds]`, instead we use `torch.where` here.\n        # Since only 1-D indices with type 'tensor(bool)' are supported\n        # when exporting to ONNX, any other bool indices with more dimensions\n        # (e.g. 2-D bool tensor) as input parameter in node is invalid\n        negative_scores = -1 * torch.ones_like(scores)\n        scores = torch.where(cls_inds, negative_scores, scores)\n        scores = torch.where(width_inds, negative_scores, scores)\n        scores = torch.where(height_inds, negative_scores, scores)\n        scores = torch.where(dist_inds, negative_scores, scores)\n\n        if with_centripetal_shift:\n            scores[tl_ctx_inds] = -1\n            scores[tl_cty_inds] = -1\n            scores[br_ctx_inds] = -1\n            scores[br_cty_inds] = -1\n\n        scores = scores.view(batch, -1)\n        scores, inds = torch.topk(scores, num_dets)\n        scores = scores.unsqueeze(2)\n\n        bboxes = bboxes.view(batch, -1, 4)\n        bboxes = gather_feat(bboxes, inds)\n\n        clses = tl_clses.contiguous().view(batch, -1, 1)\n        clses = gather_feat(clses, inds).float()\n\n        return bboxes, scores, clses\n\n    def onnx_export(self,\n                    tl_heats,\n                    br_heats,\n                    tl_embs,\n                    br_embs,\n                    tl_offs,\n                    br_offs,\n                    img_metas,\n                    rescale=False,\n                    with_nms=True):\n        \"\"\"Transform network output for a batch into bbox predictions.\n\n        Args:\n            tl_heats (list[Tensor]): Top-left corner heatmaps for each level\n                with shape (N, num_classes, H, W).\n            br_heats (list[Tensor]): Bottom-right corner heatmaps for each\n                level with shape (N, num_classes, H, W).\n            tl_embs (list[Tensor]): Top-left corner embeddings for each level\n                with shape (N, corner_emb_channels, H, W).\n            br_embs (list[Tensor]): Bottom-right corner embeddings for each\n                level with shape (N, corner_emb_channels, H, W).\n            tl_offs (list[Tensor]): Top-left corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            br_offs (list[Tensor]): Bottom-right corner offsets for each level\n                with shape (N, corner_offset_channels, H, W).\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            tuple[Tensor, Tensor]: First tensor bboxes with shape\n            [N, num_det, 5], 5 arrange as (x1, y1, x2, y2, score)\n            and second element is class labels of shape [N, num_det].\n        \"\"\"\n        assert tl_heats[-1].shape[0] == br_heats[-1].shape[0] == len(\n            img_metas) == 1\n        result_list = []\n        for img_id in range(len(img_metas)):\n            result_list.append(\n                self._get_bboxes_single(\n                    tl_heats[-1][img_id:img_id + 1, :],\n                    br_heats[-1][img_id:img_id + 1, :],\n                    tl_offs[-1][img_id:img_id + 1, :],\n                    br_offs[-1][img_id:img_id + 1, :],\n                    img_metas[img_id],\n                    tl_emb=tl_embs[-1][img_id:img_id + 1, :],\n                    br_emb=br_embs[-1][img_id:img_id + 1, :],\n                    rescale=rescale,\n                    with_nms=with_nms))\n\n        detections, labels = result_list[0]\n        # batch_size 1 here, [1, num_det, 5], [1, num_det]\n        return detections.unsqueeze(0), labels.unsqueeze(0)\n"
  },
  {
    "path": "mmdet/models/dense_heads/ddod_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (anchor_inside_flags, build_assigner, build_sampler,\n                        images_to_levels, multi_apply, reduce_mean, unmap)\nfrom mmdet.core.bbox import bbox_overlaps\nfrom ..builder import HEADS, build_loss\nfrom .anchor_head import AnchorHead\n\nEPS = 1e-12\n\n\n@HEADS.register_module()\nclass DDODHead(AnchorHead):\n    \"\"\"DDOD head decomposes conjunctions lying in most current one-stage\n    detectors via label assignment disentanglement, spatial feature\n    disentanglement, and pyramid supervision disentanglement.\n\n    https://arxiv.org/abs/2107.02963\n\n    Args:\n        num_classes (int): Number of categories excluding the\n            background category.\n        in_channels (int): Number of channels in the input feature map.\n        stacked_convs (int): The number of stacked Conv. Default: 4.\n        conv_cfg (dict): Conv config of ddod head. Default: None.\n        use_dcn (bool): Use dcn, Same as ATSS when False. Default: True.\n        norm_cfg (dict): Normal config of ddod head. Default:\n            dict(type='GN', num_groups=32, requires_grad=True).\n        loss_iou (dict): Config of IoU loss. Default:\n            dict(type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0).\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 stacked_convs=4,\n                 conv_cfg=None,\n                 use_dcn=True,\n                 norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n                 loss_iou=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 **kwargs):\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.use_dcn = use_dcn\n        super(DDODHead, self).__init__(num_classes, in_channels, **kwargs)\n\n        self.sampling = False\n        if self.train_cfg:\n            self.cls_assigner = build_assigner(self.train_cfg.assigner)\n            self.reg_assigner = build_assigner(self.train_cfg.reg_assigner)\n            sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n        self.loss_iou = build_loss(loss_iou)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=dict(type='DCN', deform_groups=1)\n                    if i == 0 and self.use_dcn else self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=dict(type='DCN', deform_groups=1)\n                    if i == 0 and self.use_dcn else self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        self.atss_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.atss_reg = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n        self.atss_iou = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 1, 3, padding=1)\n        self.scales = nn.ModuleList(\n            [Scale(1.0) for _ in self.prior_generator.strides])\n\n        # we use the global list in loss\n        self.cls_num_pos_samples_per_level = [\n            0. for _ in range(len(self.prior_generator.strides))\n        ]\n        self.reg_num_pos_samples_per_level = [\n            0. for _ in range(len(self.prior_generator.strides))\n        ]\n\n    def init_weights(self):\n        \"\"\"Initialize weights of the head.\"\"\"\n        for m in self.cls_convs:\n            normal_init(m.conv, std=0.01)\n        for m in self.reg_convs:\n            normal_init(m.conv, std=0.01)\n        normal_init(self.atss_reg, std=0.01)\n        normal_init(self.atss_iou, std=0.01)\n        bias_cls = bias_init_with_prob(0.01)\n        normal_init(self.atss_cls, std=0.01, bias=bias_cls)\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n                cls_scores (list[Tensor]): Classification scores for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_base_priors * num_classes.\n                bbox_preds (list[Tensor]): Box energies / deltas for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_base_priors * 4.\n                iou_preds (list[Tensor]): IoU scores for all scale levels,\n                    each is a 4D-tensor, the channels number is\n                    num_base_priors * 1.\n        \"\"\"\n        return multi_apply(self.forward_single, feats, self.scales)\n\n    def forward_single(self, x, scale):\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n            scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n\n        Returns:\n            tuple:\n                - cls_score (Tensor): Cls scores for a single scale level \\\n                    the channels number is num_base_priors * num_classes.\n                - bbox_pred (Tensor): Box energies / deltas for a single \\\n                    scale level, the channels number is num_base_priors * 4.\n                - iou_pred (Tensor): Iou for a single scale level, the \\\n                    channel number is (N, num_base_priors * 1, H, W).\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n        cls_score = self.atss_cls(cls_feat)\n        # we just follow atss, not apply exp in bbox_pred\n        bbox_pred = scale(self.atss_reg(reg_feat)).float()\n        iou_pred = self.atss_iou(reg_feat)\n        return cls_score, bbox_pred, iou_pred\n\n    def loss_cls_single(self, cls_score, labels, label_weights,\n                        reweight_factor, num_total_samples):\n        \"\"\"Compute cls loss of a single scale level.\n\n        Args:\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_base_priors * num_classes, H, W).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            reweight_factor (list[int]): Reweight factor for cls and reg\n                loss.\n            num_total_samples (int): Number of positive samples that is\n                reduced over all GPUs.\n\n        Returns:\n            tuple[Tensor]: A tuple of loss components.\n        \"\"\"\n        cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.cls_out_channels).contiguous()\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        loss_cls = self.loss_cls(\n            cls_score, labels, label_weights, avg_factor=num_total_samples)\n        return reweight_factor * loss_cls,\n\n    def loss_reg_single(self, anchors, bbox_pred, iou_pred, labels,\n                        label_weights, bbox_targets, bbox_weights,\n                        reweight_factor, num_total_samples):\n        \"\"\"Compute reg loss of a single scale level.\n\n        Args:\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            bbox_pred (Tensor): Box energies / deltas for each scale\n                level with shape (N, num_base_priors * 4, H, W).\n            iou_pred (Tensor): Iou for a single scale level, the\n                channel number is (N, num_base_priors * 1, H, W).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (N, num_total_anchors, 4).\n            bbox_weights (Tensor): BBox weights of all anchors in the\n                image with shape (N, 4)\n            reweight_factor (list[int]): Reweight factor for cls and reg\n                loss.\n            num_total_samples (int): Number of positive samples that is\n                reduced over all GPUs.\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        anchors = anchors.reshape(-1, 4)\n        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n        iou_pred = iou_pred.permute(0, 2, 3, 1).reshape(-1, )\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        bbox_weights = bbox_weights.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n\n        iou_targets = label_weights.new_zeros(labels.shape)\n        iou_weights = label_weights.new_zeros(labels.shape)\n        iou_weights[(bbox_weights.sum(axis=1) > 0).nonzero(\n            as_tuple=False)] = 1.\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    &\n                    (labels < bg_class_ind)).nonzero(as_tuple=False).squeeze(1)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n            pos_anchors = anchors[pos_inds]\n\n            pos_decode_bbox_pred = self.bbox_coder.decode(\n                pos_anchors, pos_bbox_pred)\n            pos_decode_bbox_targets = self.bbox_coder.decode(\n                pos_anchors, pos_bbox_targets)\n\n            # regression loss\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_decode_bbox_targets,\n                avg_factor=num_total_samples)\n\n            iou_targets[pos_inds] = bbox_overlaps(\n                pos_decode_bbox_pred.detach(),\n                pos_decode_bbox_targets,\n                is_aligned=True)\n            loss_iou = self.loss_iou(\n                iou_pred,\n                iou_targets,\n                iou_weights,\n                avg_factor=num_total_samples)\n        else:\n            loss_bbox = bbox_pred.sum() * 0\n            loss_iou = iou_pred.sum() * 0\n\n        return reweight_factor * loss_bbox, reweight_factor * loss_iou\n\n    def calc_reweight_factor(self, labels_list):\n        \"\"\"Compute reweight_factor for regression and classification loss.\"\"\"\n        # get pos samples for each level\n        bg_class_ind = self.num_classes\n        for ii, each_level_label in enumerate(labels_list):\n            pos_inds = ((each_level_label >= 0) &\n                        (each_level_label < bg_class_ind)).nonzero(\n                            as_tuple=False).squeeze(1)\n            self.cls_num_pos_samples_per_level[ii] += len(pos_inds)\n        # get reweight factor from 1 ~ 2 with bilinear interpolation\n        min_pos_samples = min(self.cls_num_pos_samples_per_level)\n        max_pos_samples = max(self.cls_num_pos_samples_per_level)\n        interval = 1. / (max_pos_samples - min_pos_samples + 1e-10)\n        reweight_factor_per_level = []\n        for pos_samples in self.cls_num_pos_samples_per_level:\n            factor = 2. - (pos_samples - min_pos_samples) * interval\n            reweight_factor_per_level.append(factor)\n        return reweight_factor_per_level\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             iou_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_base_priors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_base_priors * 4, H, W)\n            iou_preds (list[Tensor]): Score factor for all scale level,\n                each is a 4D-tensor, has shape (batch_size, 1, H, W).\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor] | None): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n        # calculate common vars for cls and reg assigners at once\n        targets_com = self.process_predictions_and_anchors(\n            anchor_list, valid_flag_list, cls_scores, bbox_preds, img_metas,\n            gt_bboxes_ignore)\n        (anchor_list, valid_flag_list, num_level_anchors_list, cls_score_list,\n         bbox_pred_list, gt_bboxes_ignore_list) = targets_com\n\n        # classification branch assigner\n        cls_targets = self.get_cls_targets(\n            anchor_list,\n            valid_flag_list,\n            num_level_anchors_list,\n            cls_score_list,\n            bbox_pred_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore_list,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels)\n        if cls_targets is None:\n            return None\n\n        (cls_anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, num_total_pos, num_total_neg) = cls_targets\n\n        num_total_samples = reduce_mean(\n            torch.tensor(num_total_pos, dtype=torch.float,\n                         device=device)).item()\n        num_total_samples = max(num_total_samples, 1.0)\n\n        reweight_factor_per_level = self.calc_reweight_factor(labels_list)\n\n        cls_losses_cls, = multi_apply(\n            self.loss_cls_single,\n            cls_scores,\n            labels_list,\n            label_weights_list,\n            reweight_factor_per_level,\n            num_total_samples=num_total_samples)\n\n        # regression branch assigner\n        reg_targets = self.get_reg_targets(\n            anchor_list,\n            valid_flag_list,\n            num_level_anchors_list,\n            cls_score_list,\n            bbox_pred_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore_list,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels)\n        if reg_targets is None:\n            return None\n\n        (reg_anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, num_total_pos, num_total_neg) = reg_targets\n\n        num_total_samples = reduce_mean(\n            torch.tensor(num_total_pos, dtype=torch.float,\n                         device=device)).item()\n        num_total_samples = max(num_total_samples, 1.0)\n\n        reweight_factor_per_level = self.calc_reweight_factor(labels_list)\n\n        reg_losses_bbox, reg_losses_iou = multi_apply(\n            self.loss_reg_single,\n            reg_anchor_list,\n            bbox_preds,\n            iou_preds,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            reweight_factor_per_level,\n            num_total_samples=num_total_samples)\n\n        return dict(\n            loss_cls=cls_losses_cls,\n            loss_bbox=reg_losses_bbox,\n            loss_iou=reg_losses_iou)\n\n    def process_predictions_and_anchors(self, anchor_list, valid_flag_list,\n                                        cls_scores, bbox_preds, img_metas,\n                                        gt_bboxes_ignore_list):\n        \"\"\"Compute common vars for regression and classification targets.\n\n        Args:\n            anchor_list (list[Tensor]): anchors of each image.\n            valid_flag_list (list[Tensor]): Valid flags of each image.\n            cls_scores (list[Tensor]): Classification scores for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * 4.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Return:\n            tuple[Tensor]: A tuple of common loss vars.\n        \"\"\"\n        num_imgs = len(img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        num_level_anchors_list = [num_level_anchors] * num_imgs\n\n        anchor_list_ = []\n        valid_flag_list_ = []\n        # concat all level anchors and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            anchor_list_.append(torch.cat(anchor_list[i]))\n            valid_flag_list_.append(torch.cat(valid_flag_list[i]))\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n\n        num_levels = len(cls_scores)\n        cls_score_list = []\n        bbox_pred_list = []\n\n        mlvl_cls_score_list = [\n            cls_score.permute(0, 2, 3, 1).reshape(\n                num_imgs, -1, self.num_base_priors * self.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        mlvl_bbox_pred_list = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                  self.num_base_priors * 4)\n            for bbox_pred in bbox_preds\n        ]\n\n        for i in range(num_imgs):\n            mlvl_cls_tensor_list = [\n                mlvl_cls_score_list[j][i] for j in range(num_levels)\n            ]\n            mlvl_bbox_tensor_list = [\n                mlvl_bbox_pred_list[j][i] for j in range(num_levels)\n            ]\n            cat_mlvl_cls_score = torch.cat(mlvl_cls_tensor_list, dim=0)\n            cat_mlvl_bbox_pred = torch.cat(mlvl_bbox_tensor_list, dim=0)\n            cls_score_list.append(cat_mlvl_cls_score)\n            bbox_pred_list.append(cat_mlvl_bbox_pred)\n        return (anchor_list_, valid_flag_list_, num_level_anchors_list,\n                cls_score_list, bbox_pred_list, gt_bboxes_ignore_list)\n\n    def get_cls_targets(self,\n                        anchor_list,\n                        valid_flag_list,\n                        num_level_anchors_list,\n                        cls_score_list,\n                        bbox_pred_list,\n                        gt_bboxes_list,\n                        img_metas,\n                        gt_bboxes_ignore_list=None,\n                        gt_labels_list=None,\n                        label_channels=1,\n                        unmap_outputs=True):\n        \"\"\"Get cls targets for DDOD head.\n\n        This method is almost the same as `AnchorHead.get_targets()`.\n        Besides returning the targets as the parent  method does,\n        it also returns the anchors as the first element of the\n        returned tuple.\n\n        Args:\n            anchor_list (list[Tensor]): anchors of each image.\n            valid_flag_list (list[Tensor]): Valid flags of each image.\n            num_level_anchors_list (list[Tensor]): Number of anchors of each\n                scale level of all image.\n            cls_score_list (list[Tensor]): Classification scores for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * num_classes.\n            bbox_pred_list (list[Tensor]): Box energies / deltas for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * 4.\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding\n                boxes can be ignored when computing the loss.\n            gt_labels_list (list[Tensor]): class indices corresponding to\n                each box.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Return:\n            tuple[Tensor]: A tuple of cls targets components.\n        \"\"\"\n        (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n         all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(\n             self._get_target_single,\n             anchor_list,\n             valid_flag_list,\n             cls_score_list,\n             bbox_pred_list,\n             num_level_anchors_list,\n             gt_bboxes_list,\n             gt_bboxes_ignore_list,\n             gt_labels_list,\n             img_metas,\n             label_channels=label_channels,\n             unmap_outputs=unmap_outputs,\n             is_cls_assigner=True)\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n        # sampled anchors of all images\n        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0])\n        labels_list = images_to_levels(all_labels, num_level_anchors_list[0])\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors_list[0])\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors_list[0])\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors_list[0])\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, bbox_weights_list, num_total_pos,\n                num_total_neg)\n\n    def get_reg_targets(self,\n                        anchor_list,\n                        valid_flag_list,\n                        num_level_anchors_list,\n                        cls_score_list,\n                        bbox_pred_list,\n                        gt_bboxes_list,\n                        img_metas,\n                        gt_bboxes_ignore_list=None,\n                        gt_labels_list=None,\n                        label_channels=1,\n                        unmap_outputs=True):\n        \"\"\"Get reg targets for DDOD head.\n\n        This method is almost the same as `AnchorHead.get_targets()` when\n        is_cls_assigner is False. Besides returning the targets as the parent\n        method does, it also returns the anchors as the first element of the\n        returned tuple.\n\n        Args:\n            anchor_list (list[Tensor]): anchors of each image.\n            valid_flag_list (list[Tensor]): Valid flags of each image.\n            num_level_anchors (int): Number of anchors of each scale level.\n            cls_scores (list[Tensor]): Classification scores for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for all scale\n                levels, each is a 4D-tensor, the channels number is\n                num_base_priors * 4.\n            gt_labels_list (list[Tensor]): class indices corresponding to\n                each box.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore_list (list[Tensor] | None): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Return:\n            tuple[Tensor]: A tuple of reg targets components.\n        \"\"\"\n        (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n         all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(\n             self._get_target_single,\n             anchor_list,\n             valid_flag_list,\n             cls_score_list,\n             bbox_pred_list,\n             num_level_anchors_list,\n             gt_bboxes_list,\n             gt_bboxes_ignore_list,\n             gt_labels_list,\n             img_metas,\n             label_channels=label_channels,\n             unmap_outputs=unmap_outputs,\n             is_cls_assigner=False)\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n        # sampled anchors of all images\n        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors_list[0])\n        labels_list = images_to_levels(all_labels, num_level_anchors_list[0])\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors_list[0])\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors_list[0])\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors_list[0])\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, bbox_weights_list, num_total_pos,\n                num_total_neg)\n\n    def _get_target_single(self,\n                           flat_anchors,\n                           valid_flags,\n                           cls_scores,\n                           bbox_preds,\n                           num_level_anchors,\n                           gt_bboxes,\n                           gt_bboxes_ignore,\n                           gt_labels,\n                           img_meta,\n                           label_channels=1,\n                           unmap_outputs=True,\n                           is_cls_assigner=True):\n        \"\"\"Compute regression, classification targets for anchors in a single\n        image.\n\n        Args:\n            flat_anchors (Tensor): Multi-level anchors of the image,\n                which are concatenated into a single tensor of shape\n                (num_base_priors, 4).\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                shape (num_base_priors,).\n            cls_scores (Tensor): Classification scores for all scale\n                levels of the image.\n            bbox_preds (Tensor): Box energies / deltas for all scale\n                levels of the image.\n            num_level_anchors (list[int]): Number of anchors of each\n                scale level.\n            gt_bboxes (Tensor): Ground truth bboxes of the image,\n                shape (num_gts, 4).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, ).\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts, ).\n            img_meta (dict): Meta info of the image.\n            label_channels (int): Channel of label. Default: 1.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors. Default: True.\n            is_cls_assigner (bool): Classification or regression.\n                Default: True.\n\n        Returns:\n            tuple: N is the number of total anchors in the image.\n                - labels (Tensor): Labels of all anchors in the image with \\\n                    shape (N, ).\n                - label_weights (Tensor): Label weights of all anchor in the \\\n                    image with shape (N, ).\n                - bbox_targets (Tensor): BBox targets of all anchors in the \\\n                    image with shape (N, 4).\n                - bbox_weights (Tensor): BBox weights of all anchors in the \\\n                    image with shape (N, 4)\n                - pos_inds (Tensor): Indices of positive anchor with shape \\\n                    (num_pos, ).\n                - neg_inds (Tensor): Indices of negative anchor with shape \\\n                    (num_neg, ).\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg.allowed_border)\n        if not inside_flags.any():\n            return (None, ) * 7\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n\n        num_level_anchors_inside = self.get_num_level_anchors_inside(\n            num_level_anchors, inside_flags)\n        bbox_preds_valid = bbox_preds[inside_flags, :]\n        cls_scores_valid = cls_scores[inside_flags, :]\n\n        assigner = self.cls_assigner if is_cls_assigner else self.reg_assigner\n\n        # decode prediction out of assigner\n        bbox_preds_valid = self.bbox_coder.decode(anchors, bbox_preds_valid)\n        assign_result = assigner.assign(anchors, num_level_anchors_inside,\n                                        gt_bboxes, gt_bboxes_ignore, gt_labels,\n                                        cls_scores_valid, bbox_preds_valid)\n        sampling_result = self.sampler.sample(assign_result, anchors,\n                                              gt_bboxes)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        bbox_weights = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            if hasattr(self, 'bbox_coder'):\n                pos_bbox_targets = self.bbox_coder.encode(\n                    sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n            else:\n                # used in VFNetHead\n                pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n            if gt_labels is None:\n                # Only rpn gives gt_labels as None\n                # Foreground is the first class since v2.5.0\n                labels[pos_inds] = 0\n            else:\n                labels[pos_inds] = gt_labels[\n                    sampling_result.pos_assigned_gt_inds]\n            if self.train_cfg.pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg.pos_weight\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            anchors = unmap(anchors, num_total_anchors, inside_flags)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n        return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n                pos_inds, neg_inds)\n\n    def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n        \"\"\"Get the anchors of each scale level inside.\n\n        Args:\n            num_level_anchors (list[int]): Number of anchors of each\n                scale level.\n            inside_flags (Tensor): Multi level inside flags of the image,\n                which are concatenated into a single tensor of\n                shape (num_base_priors,).\n\n        Returns:\n            list[int]: Number of anchors of each scale level inside.\n        \"\"\"\n        split_inside_flags = torch.split(inside_flags, num_level_anchors)\n        num_level_anchors_inside = [\n            int(flags.sum()) for flags in split_inside_flags\n        ]\n        return num_level_anchors_inside\n"
  },
  {
    "path": "mmdet/models/dense_heads/deformable_detr_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Linear, bias_init_with_prob, constant_init\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import multi_apply\nfrom mmdet.models.utils.transformer import inverse_sigmoid\nfrom ..builder import HEADS\nfrom .detr_head import DETRHead\n\n\n@HEADS.register_module()\nclass DeformableDETRHead(DETRHead):\n    \"\"\"Head of DeformDETR: Deformable DETR: Deformable Transformers for End-to-\n    End Object Detection.\n\n    Code is modified from the `official github repo\n    <https://github.com/fundamentalvision/Deformable-DETR>`_.\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/2010.04159>`_ .\n\n    Args:\n        with_box_refine (bool): Whether to refine the reference points\n            in the decoder. Defaults to False.\n        as_two_stage (bool) : Whether to generate the proposal from\n            the outputs of encoder.\n        transformer (obj:`ConfigDict`): ConfigDict is used for building\n            the Encoder and Decoder.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 with_box_refine=False,\n                 as_two_stage=False,\n                 transformer=None,\n                 **kwargs):\n        self.with_box_refine = with_box_refine\n        self.as_two_stage = as_two_stage\n        if self.as_two_stage:\n            transformer['as_two_stage'] = self.as_two_stage\n\n        super(DeformableDETRHead, self).__init__(\n            *args, transformer=transformer, **kwargs)\n\n    def _init_layers(self):\n        \"\"\"Initialize classification branch and regression branch of head.\"\"\"\n\n        fc_cls = Linear(self.embed_dims, self.cls_out_channels)\n        reg_branch = []\n        for _ in range(self.num_reg_fcs):\n            reg_branch.append(Linear(self.embed_dims, self.embed_dims))\n            reg_branch.append(nn.ReLU())\n        reg_branch.append(Linear(self.embed_dims, 4))\n        reg_branch = nn.Sequential(*reg_branch)\n\n        def _get_clones(module, N):\n            return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n        # last reg_branch is used to generate proposal from\n        # encode feature map when as_two_stage is True.\n        num_pred = (self.transformer.decoder.num_layers + 1) if \\\n            self.as_two_stage else self.transformer.decoder.num_layers\n\n        if self.with_box_refine:\n            self.cls_branches = _get_clones(fc_cls, num_pred)\n            self.reg_branches = _get_clones(reg_branch, num_pred)\n        else:\n\n            self.cls_branches = nn.ModuleList(\n                [fc_cls for _ in range(num_pred)])\n            self.reg_branches = nn.ModuleList(\n                [reg_branch for _ in range(num_pred)])\n\n        if not self.as_two_stage:\n            self.query_embedding = nn.Embedding(self.num_query,\n                                                self.embed_dims * 2)\n\n    def init_weights(self):\n        \"\"\"Initialize weights of the DeformDETR head.\"\"\"\n        self.transformer.init_weights()\n        if self.loss_cls.use_sigmoid:\n            bias_init = bias_init_with_prob(0.01)\n            for m in self.cls_branches:\n                nn.init.constant_(m.bias, bias_init)\n        for m in self.reg_branches:\n            constant_init(m[-1], 0, bias=0)\n        nn.init.constant_(self.reg_branches[0][-1].bias.data[2:], -2.0)\n        if self.as_two_stage:\n            for m in self.reg_branches:\n                nn.init.constant_(m[-1].bias.data[2:], 0.0)\n\n    def forward(self, mlvl_feats, img_metas):\n        \"\"\"Forward function.\n\n        Args:\n            mlvl_feats (tuple[Tensor]): Features from the upstream\n                network, each is a 4D-tensor with shape\n                (N, C, H, W).\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            all_cls_scores (Tensor): Outputs from the classification head, \\\n                shape [nb_dec, bs, num_query, cls_out_channels]. Note \\\n                cls_out_channels should includes background.\n            all_bbox_preds (Tensor): Sigmoid outputs from the regression \\\n                head with normalized coordinate format (cx, cy, w, h). \\\n                Shape [nb_dec, bs, num_query, 4].\n            enc_outputs_class (Tensor): The score of each point on encode \\\n                feature map, has shape (N, h*w, num_class). Only when \\\n                as_two_stage is True it would be returned, otherwise \\\n                `None` would be returned.\n            enc_outputs_coord (Tensor): The proposal generate from the \\\n                encode feature map, has shape (N, h*w, 4). Only when \\\n                as_two_stage is True it would be returned, otherwise \\\n                `None` would be returned.\n        \"\"\"\n\n        batch_size = mlvl_feats[0].size(0)\n        input_img_h, input_img_w = img_metas[0]['batch_input_shape']\n        img_masks = mlvl_feats[0].new_ones(\n            (batch_size, input_img_h, input_img_w))\n        for img_id in range(batch_size):\n            img_h, img_w, _ = img_metas[img_id]['img_shape']\n            img_masks[img_id, :img_h, :img_w] = 0\n\n        mlvl_masks = []\n        mlvl_positional_encodings = []\n        for feat in mlvl_feats:\n            mlvl_masks.append(\n                F.interpolate(img_masks[None],\n                              size=feat.shape[-2:]).to(torch.bool).squeeze(0))\n            mlvl_positional_encodings.append(\n                self.positional_encoding(mlvl_masks[-1]))\n\n        query_embeds = None\n        if not self.as_two_stage:\n            query_embeds = self.query_embedding.weight\n        hs, init_reference, inter_references, \\\n            enc_outputs_class, enc_outputs_coord = self.transformer(\n                    mlvl_feats,\n                    mlvl_masks,\n                    query_embeds,\n                    mlvl_positional_encodings,\n                    reg_branches=self.reg_branches if self.with_box_refine else None,  # noqa:E501\n                    cls_branches=self.cls_branches if self.as_two_stage else None  # noqa:E501\n            )\n        hs = hs.permute(0, 2, 1, 3)\n        outputs_classes = []\n        outputs_coords = []\n\n        for lvl in range(hs.shape[0]):\n            if lvl == 0:\n                reference = init_reference\n            else:\n                reference = inter_references[lvl - 1]\n            reference = inverse_sigmoid(reference)\n            outputs_class = self.cls_branches[lvl](hs[lvl])\n            tmp = self.reg_branches[lvl](hs[lvl])\n            if reference.shape[-1] == 4:\n                tmp += reference\n            else:\n                assert reference.shape[-1] == 2\n                tmp[..., :2] += reference\n            outputs_coord = tmp.sigmoid()\n            outputs_classes.append(outputs_class)\n            outputs_coords.append(outputs_coord)\n\n        outputs_classes = torch.stack(outputs_classes)\n        outputs_coords = torch.stack(outputs_coords)\n        if self.as_two_stage:\n            return outputs_classes, outputs_coords, \\\n                enc_outputs_class, \\\n                enc_outputs_coord.sigmoid()\n        else:\n            return outputs_classes, outputs_coords, \\\n                None, None\n\n    @force_fp32(apply_to=('all_cls_scores', 'all_bbox_preds'))\n    def loss(self,\n             all_cls_scores,\n             all_bbox_preds,\n             enc_cls_scores,\n             enc_bbox_preds,\n             gt_bboxes_list,\n             gt_labels_list,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"\"Loss function.\n\n        Args:\n            all_cls_scores (Tensor): Classification score of all\n                decoder layers, has shape\n                [nb_dec, bs, num_query, cls_out_channels].\n            all_bbox_preds (Tensor): Sigmoid regression\n                outputs of all decode layers. Each is a 4D-tensor with\n                normalized coordinate format (cx, cy, w, h) and shape\n                [nb_dec, bs, num_query, 4].\n            enc_cls_scores (Tensor): Classification scores of\n                points on encode feature map , has shape\n                (N, h*w, num_classes). Only be passed when as_two_stage is\n                True, otherwise is None.\n            enc_bbox_preds (Tensor): Regression results of each points\n                on the encode feature map, has shape (N, h*w, 4). Only be\n                passed when as_two_stage is True, otherwise is None.\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image\n                with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels_list (list[Tensor]): Ground truth class indices for each\n                image with shape (num_gts, ).\n            img_metas (list[dict]): List of image meta information.\n            gt_bboxes_ignore (list[Tensor], optional): Bounding boxes\n                which can be ignored for each image. Default None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert gt_bboxes_ignore is None, \\\n            f'{self.__class__.__name__} only supports ' \\\n            f'for gt_bboxes_ignore setting to None.'\n\n        num_dec_layers = len(all_cls_scores)\n        all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]\n        all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]\n        all_gt_bboxes_ignore_list = [\n            gt_bboxes_ignore for _ in range(num_dec_layers)\n        ]\n        img_metas_list = [img_metas for _ in range(num_dec_layers)]\n\n        losses_cls, losses_bbox, losses_iou = multi_apply(\n            self.loss_single, all_cls_scores, all_bbox_preds,\n            all_gt_bboxes_list, all_gt_labels_list, img_metas_list,\n            all_gt_bboxes_ignore_list)\n\n        loss_dict = dict()\n        # loss of proposal generated from encode feature map.\n        if enc_cls_scores is not None:\n            binary_labels_list = [\n                torch.zeros_like(gt_labels_list[i])\n                for i in range(len(img_metas))\n            ]\n            enc_loss_cls, enc_losses_bbox, enc_losses_iou = \\\n                self.loss_single(enc_cls_scores, enc_bbox_preds,\n                                 gt_bboxes_list, binary_labels_list,\n                                 img_metas, gt_bboxes_ignore)\n            loss_dict['enc_loss_cls'] = enc_loss_cls\n            loss_dict['enc_loss_bbox'] = enc_losses_bbox\n            loss_dict['enc_loss_iou'] = enc_losses_iou\n\n        # loss from the last decoder layer\n        loss_dict['loss_cls'] = losses_cls[-1]\n        loss_dict['loss_bbox'] = losses_bbox[-1]\n        loss_dict['loss_iou'] = losses_iou[-1]\n        # loss from other decoder layers\n        num_dec_layer = 0\n        for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1],\n                                                       losses_bbox[:-1],\n                                                       losses_iou[:-1]):\n            loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i\n            loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i\n            loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i\n            num_dec_layer += 1\n        return loss_dict\n\n    @force_fp32(apply_to=('all_cls_scores', 'all_bbox_preds'))\n    def get_bboxes(self,\n                   all_cls_scores,\n                   all_bbox_preds,\n                   enc_cls_scores,\n                   enc_bbox_preds,\n                   img_metas,\n                   rescale=False):\n        \"\"\"Transform network outputs for a batch into bbox predictions.\n\n        Args:\n            all_cls_scores (Tensor): Classification score of all\n                decoder layers, has shape\n                [nb_dec, bs, num_query, cls_out_channels].\n            all_bbox_preds (Tensor): Sigmoid regression\n                outputs of all decode layers. Each is a 4D-tensor with\n                normalized coordinate format (cx, cy, w, h) and shape\n                [nb_dec, bs, num_query, 4].\n            enc_cls_scores (Tensor): Classification scores of\n                points on encode feature map , has shape\n                (N, h*w, num_classes). Only be passed when as_two_stage is\n                True, otherwise is None.\n            enc_bbox_preds (Tensor): Regression results of each points\n                on the encode feature map, has shape (N, h*w, 4). Only be\n                passed when as_two_stage is True, otherwise is None.\n            img_metas (list[dict]): Meta information of each image.\n            rescale (bool, optional): If True, return boxes in original\n                image space. Default False.\n\n        Returns:\n            list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \\\n                The first item is an (n, 5) tensor, where the first 4 columns \\\n                are bounding box positions (tl_x, tl_y, br_x, br_y) and the \\\n                5-th column is a score between 0 and 1. The second item is a \\\n                (n,) tensor where each item is the predicted class label of \\\n                the corresponding box.\n        \"\"\"\n        cls_scores = all_cls_scores[-1]\n        bbox_preds = all_bbox_preds[-1]\n\n        result_list = []\n        for img_id in range(len(img_metas)):\n            cls_score = cls_scores[img_id]\n            bbox_pred = bbox_preds[img_id]\n            img_shape = img_metas[img_id]['img_shape']\n            scale_factor = img_metas[img_id]['scale_factor']\n            proposals = self._get_bboxes_single(cls_score, bbox_pred,\n                                                img_shape, scale_factor,\n                                                rescale)\n            result_list.append(proposals)\n        return result_list\n"
  },
  {
    "path": "mmdet/models/dense_heads/dense_test_mixins.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport sys\nfrom inspect import signature\n\nimport torch\nfrom mmcv.ops import batched_nms\n\nfrom mmdet.core import bbox_mapping_back, merge_aug_proposals\n\nif sys.version_info >= (3, 7):\n    from mmdet.utils.contextmanagers import completed\n\n\nclass BBoxTestMixin(object):\n    \"\"\"Mixin class for testing det bboxes via DenseHead.\"\"\"\n\n    def simple_test_bboxes(self, feats, img_metas, rescale=False):\n        \"\"\"Test det bboxes without test-time augmentation, can be applied in\n        DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``,\n        etc.\n\n        Args:\n            feats (tuple[torch.Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            img_metas (list[dict]): List of image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n                The first item is ``bboxes`` with shape (n, 5),\n                where 5 represent (tl_x, tl_y, br_x, br_y, score).\n                The shape of the second tensor in the tuple is ``labels``\n                with shape (n,)\n        \"\"\"\n        outs = self.forward(feats)\n        results_list = self.get_bboxes(\n            *outs, img_metas=img_metas, rescale=rescale)\n        return results_list\n\n    def aug_test_bboxes(self, feats, img_metas, rescale=False):\n        \"\"\"Test det bboxes with test time augmentation, can be applied in\n        DenseHead except for ``RPNHead`` and its variants, e.g., ``GARPNHead``,\n        etc.\n\n        Args:\n            feats (list[Tensor]): the outer list indicates test-time\n                augmentations and inner Tensor should have a shape NxCxHxW,\n                which contains features for all images in the batch.\n            img_metas (list[list[dict]]): the outer list indicates test-time\n                augs (multiscale, flip, etc.) and the inner list indicates\n                images in a batch. each dict has image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n                The first item is ``bboxes`` with shape (n, 5),\n                where 5 represent (tl_x, tl_y, br_x, br_y, score).\n                The shape of the second tensor in the tuple is ``labels``\n                with shape (n,). The length of list should always be 1.\n        \"\"\"\n        # check with_nms argument\n        gb_sig = signature(self.get_bboxes)\n        gb_args = [p.name for p in gb_sig.parameters.values()]\n        gbs_sig = signature(self._get_bboxes_single)\n        gbs_args = [p.name for p in gbs_sig.parameters.values()]\n        assert ('with_nms' in gb_args) and ('with_nms' in gbs_args), \\\n            f'{self.__class__.__name__}' \\\n            ' does not support test-time augmentation'\n\n        aug_bboxes = []\n        aug_scores = []\n        aug_labels = []\n        for x, img_meta in zip(feats, img_metas):\n            # only one image in the batch\n            outs = self.forward(x)\n            bbox_outputs = self.get_bboxes(\n                *outs,\n                img_metas=img_meta,\n                cfg=self.test_cfg,\n                rescale=False,\n                with_nms=False)[0]\n            aug_bboxes.append(bbox_outputs[0])\n            aug_scores.append(bbox_outputs[1])\n            if len(bbox_outputs) >= 3:\n                aug_labels.append(bbox_outputs[2])\n\n        # after merging, bboxes will be rescaled to the original image size\n        merged_bboxes, merged_scores = self.merge_aug_bboxes(\n            aug_bboxes, aug_scores, img_metas)\n        merged_labels = torch.cat(aug_labels, dim=0) if aug_labels else None\n\n        if merged_bboxes.numel() == 0:\n            det_bboxes = torch.cat([merged_bboxes, merged_scores[:, None]], -1)\n            return [\n                (det_bboxes, merged_labels),\n            ]\n\n        det_bboxes, keep_idxs = batched_nms(merged_bboxes, merged_scores,\n                                            merged_labels, self.test_cfg.nms)\n        det_bboxes = det_bboxes[:self.test_cfg.max_per_img]\n        det_labels = merged_labels[keep_idxs][:self.test_cfg.max_per_img]\n\n        if rescale:\n            _det_bboxes = det_bboxes\n        else:\n            _det_bboxes = det_bboxes.clone()\n            _det_bboxes[:, :4] *= det_bboxes.new_tensor(\n                img_metas[0][0]['scale_factor'])\n\n        return [\n            (_det_bboxes, det_labels),\n        ]\n\n    def simple_test_rpn(self, x, img_metas):\n        \"\"\"Test without augmentation, only for ``RPNHead`` and its variants,\n        e.g., ``GARPNHead``, etc.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n            img_metas (list[dict]): Meta info of each image.\n\n        Returns:\n            list[Tensor]: Proposals of each image, each item has shape (n, 5),\n                where 5 represent (tl_x, tl_y, br_x, br_y, score).\n        \"\"\"\n        rpn_outs = self(x)\n        proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas)\n        return proposal_list\n\n    def aug_test_rpn(self, feats, img_metas):\n        \"\"\"Test with augmentation for only for ``RPNHead`` and its variants,\n        e.g., ``GARPNHead``, etc.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                        a 4D-tensor.\n            img_metas (list[dict]): Meta info of each image.\n\n        Returns:\n            list[Tensor]: Proposals of each image, each item has shape (n, 5),\n                where 5 represent (tl_x, tl_y, br_x, br_y, score).\n        \"\"\"\n        samples_per_gpu = len(img_metas[0])\n        aug_proposals = [[] for _ in range(samples_per_gpu)]\n        for x, img_meta in zip(feats, img_metas):\n            proposal_list = self.simple_test_rpn(x, img_meta)\n            for i, proposals in enumerate(proposal_list):\n                aug_proposals[i].append(proposals)\n        # reorganize the order of 'img_metas' to match the dimensions\n        # of 'aug_proposals'\n        aug_img_metas = []\n        for i in range(samples_per_gpu):\n            aug_img_meta = []\n            for j in range(len(img_metas)):\n                aug_img_meta.append(img_metas[j][i])\n            aug_img_metas.append(aug_img_meta)\n        # after merging, proposals will be rescaled to the original image size\n        merged_proposals = [\n            merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)\n            for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)\n        ]\n        return merged_proposals\n\n    if sys.version_info >= (3, 7):\n\n        async def async_simple_test_rpn(self, x, img_metas):\n            sleep_interval = self.test_cfg.pop('async_sleep_interval', 0.025)\n            async with completed(\n                    __name__, 'rpn_head_forward',\n                    sleep_interval=sleep_interval):\n                rpn_outs = self(x)\n\n            proposal_list = self.get_bboxes(*rpn_outs, img_metas=img_metas)\n            return proposal_list\n\n    def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):\n        \"\"\"Merge augmented detection bboxes and scores.\n\n        Args:\n            aug_bboxes (list[Tensor]): shape (n, 4*#class)\n            aug_scores (list[Tensor] or None): shape (n, #class)\n            img_shapes (list[Tensor]): shape (3, ).\n\n        Returns:\n            tuple[Tensor]: ``bboxes`` with shape (n,4), where\n            4 represent (tl_x, tl_y, br_x, br_y)\n            and ``scores`` with shape (n,).\n        \"\"\"\n        recovered_bboxes = []\n        for bboxes, img_info in zip(aug_bboxes, img_metas):\n            img_shape = img_info[0]['img_shape']\n            scale_factor = img_info[0]['scale_factor']\n            flip = img_info[0]['flip']\n            flip_direction = img_info[0]['flip_direction']\n            bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip,\n                                       flip_direction)\n            recovered_bboxes.append(bboxes)\n        bboxes = torch.cat(recovered_bboxes, dim=0)\n        if aug_scores is None:\n            return bboxes\n        else:\n            scores = torch.cat(aug_scores, dim=0)\n            return bboxes, scores\n"
  },
  {
    "path": "mmdet/models/dense_heads/detr_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Conv2d, Linear, build_activation_layer\nfrom mmcv.cnn.bricks.transformer import FFN, build_positional_encoding\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh,\n                        build_assigner, build_sampler, multi_apply,\n                        reduce_mean)\nfrom mmdet.models.utils import build_transformer\nfrom ..builder import HEADS, build_loss\nfrom .anchor_free_head import AnchorFreeHead\n\n\n@HEADS.register_module()\nclass DETRHead(AnchorFreeHead):\n    \"\"\"Implements the DETR transformer head.\n\n    See `paper: End-to-End Object Detection with Transformers\n    <https://arxiv.org/pdf/2005.12872>`_ for details.\n\n    Args:\n        num_classes (int): Number of categories excluding the background.\n        in_channels (int): Number of channels in the input feature map.\n        num_query (int): Number of query in Transformer.\n        num_reg_fcs (int, optional): Number of fully-connected layers used in\n            `FFN`, which is then used for the regression head. Default 2.\n        transformer (obj:`mmcv.ConfigDict`|dict): Config for transformer.\n            Default: None.\n        sync_cls_avg_factor (bool): Whether to sync the avg_factor of\n            all ranks. Default to False.\n        positional_encoding (obj:`mmcv.ConfigDict`|dict):\n            Config for position encoding.\n        loss_cls (obj:`mmcv.ConfigDict`|dict): Config of the\n            classification loss. Default `CrossEntropyLoss`.\n        loss_bbox (obj:`mmcv.ConfigDict`|dict): Config of the\n            regression loss. Default `L1Loss`.\n        loss_iou (obj:`mmcv.ConfigDict`|dict): Config of the\n            regression iou loss. Default `GIoULoss`.\n        tran_cfg (obj:`mmcv.ConfigDict`|dict): Training config of\n            transformer head.\n        test_cfg (obj:`mmcv.ConfigDict`|dict): Testing config of\n            transformer head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    _version = 2\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 num_query=100,\n                 num_reg_fcs=2,\n                 transformer=None,\n                 sync_cls_avg_factor=False,\n                 positional_encoding=dict(\n                     type='SinePositionalEncoding',\n                     num_feats=128,\n                     normalize=True),\n                 loss_cls=dict(\n                     type='CrossEntropyLoss',\n                     bg_cls_weight=0.1,\n                     use_sigmoid=False,\n                     loss_weight=1.0,\n                     class_weight=1.0),\n                 loss_bbox=dict(type='L1Loss', loss_weight=5.0),\n                 loss_iou=dict(type='GIoULoss', loss_weight=2.0),\n                 train_cfg=dict(\n                     assigner=dict(\n                         type='HungarianAssigner',\n                         cls_cost=dict(type='ClassificationCost', weight=1.),\n                         reg_cost=dict(type='BBoxL1Cost', weight=5.0),\n                         iou_cost=dict(\n                             type='IoUCost', iou_mode='giou', weight=2.0))),\n                 test_cfg=dict(max_per_img=100),\n                 init_cfg=None,\n                 **kwargs):\n        # NOTE here use `AnchorFreeHead` instead of `TransformerHead`,\n        # since it brings inconvenience when the initialization of\n        # `AnchorFreeHead` is called.\n        super(AnchorFreeHead, self).__init__(init_cfg)\n        self.bg_cls_weight = 0\n        self.sync_cls_avg_factor = sync_cls_avg_factor\n        class_weight = loss_cls.get('class_weight', None)\n        if class_weight is not None and (self.__class__ is DETRHead):\n            assert isinstance(class_weight, float), 'Expected ' \\\n                'class_weight to have type float. Found ' \\\n                f'{type(class_weight)}.'\n            # NOTE following the official DETR rep0, bg_cls_weight means\n            # relative classification weight of the no-object class.\n            bg_cls_weight = loss_cls.get('bg_cls_weight', class_weight)\n            assert isinstance(bg_cls_weight, float), 'Expected ' \\\n                'bg_cls_weight to have type float. Found ' \\\n                f'{type(bg_cls_weight)}.'\n            class_weight = torch.ones(num_classes + 1) * class_weight\n            # set background class as the last indice\n            class_weight[num_classes] = bg_cls_weight\n            loss_cls.update({'class_weight': class_weight})\n            if 'bg_cls_weight' in loss_cls:\n                loss_cls.pop('bg_cls_weight')\n            self.bg_cls_weight = bg_cls_weight\n\n        if train_cfg:\n            assert 'assigner' in train_cfg, 'assigner should be provided '\\\n                'when train_cfg is set.'\n            assigner = train_cfg['assigner']\n            assert loss_cls['loss_weight'] == assigner['cls_cost']['weight'], \\\n                'The classification weight for loss and matcher should be' \\\n                'exactly the same.'\n            assert loss_bbox['loss_weight'] == assigner['reg_cost'][\n                'weight'], 'The regression L1 weight for loss and matcher ' \\\n                'should be exactly the same.'\n            assert loss_iou['loss_weight'] == assigner['iou_cost']['weight'], \\\n                'The regression iou weight for loss and matcher should be' \\\n                'exactly the same.'\n            self.assigner = build_assigner(assigner)\n            # DETR sampling=False, so use PseudoSampler\n            sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n        self.num_query = num_query\n        self.num_classes = num_classes\n        self.in_channels = in_channels\n        self.num_reg_fcs = num_reg_fcs\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self.fp16_enabled = False\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_bbox = build_loss(loss_bbox)\n        self.loss_iou = build_loss(loss_iou)\n\n        if self.loss_cls.use_sigmoid:\n            self.cls_out_channels = num_classes\n        else:\n            self.cls_out_channels = num_classes + 1\n        self.act_cfg = transformer.get('act_cfg',\n                                       dict(type='ReLU', inplace=True))\n        self.activate = build_activation_layer(self.act_cfg)\n        self.positional_encoding = build_positional_encoding(\n            positional_encoding)\n        self.transformer = build_transformer(transformer)\n        self.embed_dims = self.transformer.embed_dims\n        assert 'num_feats' in positional_encoding\n        num_feats = positional_encoding['num_feats']\n        assert num_feats * 2 == self.embed_dims, 'embed_dims should' \\\n            f' be exactly 2 times of num_feats. Found {self.embed_dims}' \\\n            f' and {num_feats}.'\n        self._init_layers()\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the transformer head.\"\"\"\n        self.input_proj = Conv2d(\n            self.in_channels, self.embed_dims, kernel_size=1)\n        self.fc_cls = Linear(self.embed_dims, self.cls_out_channels)\n        self.reg_ffn = FFN(\n            self.embed_dims,\n            self.embed_dims,\n            self.num_reg_fcs,\n            self.act_cfg,\n            dropout=0.0,\n            add_residual=False)\n        self.fc_reg = Linear(self.embed_dims, 4)\n        self.query_embedding = nn.Embedding(self.num_query, self.embed_dims)\n\n    def init_weights(self):\n        \"\"\"Initialize weights of the transformer head.\"\"\"\n        # The initialization for transformer is important\n        self.transformer.init_weights()\n\n    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n                              missing_keys, unexpected_keys, error_msgs):\n        \"\"\"load checkpoints.\"\"\"\n        # NOTE here use `AnchorFreeHead` instead of `TransformerHead`,\n        # since `AnchorFreeHead._load_from_state_dict` should not be\n        # called here. Invoking the default `Module._load_from_state_dict`\n        # is enough.\n\n        # Names of some parameters in has been changed.\n        version = local_metadata.get('version', None)\n        if (version is None or version < 2) and self.__class__ is DETRHead:\n            convert_dict = {\n                '.self_attn.': '.attentions.0.',\n                '.ffn.': '.ffns.0.',\n                '.multihead_attn.': '.attentions.1.',\n                '.decoder.norm.': '.decoder.post_norm.'\n            }\n            state_dict_keys = list(state_dict.keys())\n            for k in state_dict_keys:\n                for ori_key, convert_key in convert_dict.items():\n                    if ori_key in k:\n                        convert_key = k.replace(ori_key, convert_key)\n                        state_dict[convert_key] = state_dict[k]\n                        del state_dict[k]\n\n        super(AnchorFreeHead,\n              self)._load_from_state_dict(state_dict, prefix, local_metadata,\n                                          strict, missing_keys,\n                                          unexpected_keys, error_msgs)\n\n    def forward(self, feats, img_metas):\n        \"\"\"Forward function.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels.\n\n                - all_cls_scores_list (list[Tensor]): Classification scores \\\n                    for each scale level. Each is a 4D-tensor with shape \\\n                    [nb_dec, bs, num_query, cls_out_channels]. Note \\\n                    `cls_out_channels` should includes background.\n                - all_bbox_preds_list (list[Tensor]): Sigmoid regression \\\n                    outputs for each scale level. Each is a 4D-tensor with \\\n                    normalized coordinate format (cx, cy, w, h) and shape \\\n                    [nb_dec, bs, num_query, 4].\n        \"\"\"\n        num_levels = len(feats)\n        img_metas_list = [img_metas for _ in range(num_levels)]\n        return multi_apply(self.forward_single, feats, img_metas_list)\n\n    def forward_single(self, x, img_metas):\n        \"\"\"\"Forward function for a single feature level.\n\n        Args:\n            x (Tensor): Input feature from backbone's single stage, shape\n                [bs, c, h, w].\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            all_cls_scores (Tensor): Outputs from the classification head,\n                shape [nb_dec, bs, num_query, cls_out_channels]. Note\n                cls_out_channels should includes background.\n            all_bbox_preds (Tensor): Sigmoid outputs from the regression\n                head with normalized coordinate format (cx, cy, w, h).\n                Shape [nb_dec, bs, num_query, 4].\n        \"\"\"\n        # construct binary masks which used for the transformer.\n        # NOTE following the official DETR repo, non-zero values representing\n        # ignored positions, while zero values means valid positions.\n        batch_size = x.size(0)\n        input_img_h, input_img_w = img_metas[0]['batch_input_shape']\n        masks = x.new_ones((batch_size, input_img_h, input_img_w))\n        for img_id in range(batch_size):\n            img_h, img_w, _ = img_metas[img_id]['img_shape']\n            masks[img_id, :img_h, :img_w] = 0\n\n        x = self.input_proj(x)\n        # interpolate masks to have the same spatial shape with x\n        masks = F.interpolate(\n            masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1)\n        # position encoding\n        pos_embed = self.positional_encoding(masks)  # [bs, embed_dim, h, w]\n        # outs_dec: [nb_dec, bs, num_query, embed_dim]\n        outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight,\n                                       pos_embed)\n\n        all_cls_scores = self.fc_cls(outs_dec)\n        all_bbox_preds = self.fc_reg(self.activate(\n            self.reg_ffn(outs_dec))).sigmoid()\n        return all_cls_scores, all_bbox_preds\n\n    @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))\n    def loss(self,\n             all_cls_scores_list,\n             all_bbox_preds_list,\n             gt_bboxes_list,\n             gt_labels_list,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"\"Loss function.\n\n        Only outputs from the last feature level are used for computing\n        losses by default.\n\n        Args:\n            all_cls_scores_list (list[Tensor]): Classification outputs\n                for each feature level. Each is a 4D-tensor with shape\n                [nb_dec, bs, num_query, cls_out_channels].\n            all_bbox_preds_list (list[Tensor]): Sigmoid regression\n                outputs for each feature level. Each is a 4D-tensor with\n                normalized coordinate format (cx, cy, w, h) and shape\n                [nb_dec, bs, num_query, 4].\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image\n                with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels_list (list[Tensor]): Ground truth class indices for each\n                image with shape (num_gts, ).\n            img_metas (list[dict]): List of image meta information.\n            gt_bboxes_ignore (list[Tensor], optional): Bounding boxes\n                which can be ignored for each image. Default None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        # NOTE defaultly only the outputs from the last feature scale is used.\n        all_cls_scores = all_cls_scores_list[-1]\n        all_bbox_preds = all_bbox_preds_list[-1]\n        assert gt_bboxes_ignore is None, \\\n            'Only supports for gt_bboxes_ignore setting to None.'\n\n        num_dec_layers = len(all_cls_scores)\n        all_gt_bboxes_list = [gt_bboxes_list for _ in range(num_dec_layers)]\n        all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]\n        all_gt_bboxes_ignore_list = [\n            gt_bboxes_ignore for _ in range(num_dec_layers)\n        ]\n        img_metas_list = [img_metas for _ in range(num_dec_layers)]\n\n        losses_cls, losses_bbox, losses_iou = multi_apply(\n            self.loss_single, all_cls_scores, all_bbox_preds,\n            all_gt_bboxes_list, all_gt_labels_list, img_metas_list,\n            all_gt_bboxes_ignore_list)\n\n        loss_dict = dict()\n        # loss from the last decoder layer\n        loss_dict['loss_cls'] = losses_cls[-1]\n        loss_dict['loss_bbox'] = losses_bbox[-1]\n        loss_dict['loss_iou'] = losses_iou[-1]\n        # loss from other decoder layers\n        num_dec_layer = 0\n        for loss_cls_i, loss_bbox_i, loss_iou_i in zip(losses_cls[:-1],\n                                                       losses_bbox[:-1],\n                                                       losses_iou[:-1]):\n            loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i\n            loss_dict[f'd{num_dec_layer}.loss_bbox'] = loss_bbox_i\n            loss_dict[f'd{num_dec_layer}.loss_iou'] = loss_iou_i\n            num_dec_layer += 1\n        return loss_dict\n\n    def loss_single(self,\n                    cls_scores,\n                    bbox_preds,\n                    gt_bboxes_list,\n                    gt_labels_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None):\n        \"\"\"\"Loss function for outputs from a single decoder layer of a single\n        feature level.\n\n        Args:\n            cls_scores (Tensor): Box score logits from a single decoder layer\n                for all images. Shape [bs, num_query, cls_out_channels].\n            bbox_preds (Tensor): Sigmoid outputs from a single decoder layer\n                for all images, with normalized coordinate (cx, cy, w, h) and\n                shape [bs, num_query, 4].\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image\n                with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels_list (list[Tensor]): Ground truth class indices for each\n                image with shape (num_gts, ).\n            img_metas (list[dict]): List of image meta information.\n            gt_bboxes_ignore_list (list[Tensor], optional): Bounding\n                boxes which can be ignored for each image. Default None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components for outputs from\n                a single decoder layer.\n        \"\"\"\n        num_imgs = cls_scores.size(0)\n        cls_scores_list = [cls_scores[i] for i in range(num_imgs)]\n        bbox_preds_list = [bbox_preds[i] for i in range(num_imgs)]\n        cls_reg_targets = self.get_targets(cls_scores_list, bbox_preds_list,\n                                           gt_bboxes_list, gt_labels_list,\n                                           img_metas, gt_bboxes_ignore_list)\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg) = cls_reg_targets\n        labels = torch.cat(labels_list, 0)\n        label_weights = torch.cat(label_weights_list, 0)\n        bbox_targets = torch.cat(bbox_targets_list, 0)\n        bbox_weights = torch.cat(bbox_weights_list, 0)\n\n        # classification loss\n        cls_scores = cls_scores.reshape(-1, self.cls_out_channels)\n        # construct weighted avg_factor to match with the official DETR repo\n        cls_avg_factor = num_total_pos * 1.0 + \\\n            num_total_neg * self.bg_cls_weight\n        if self.sync_cls_avg_factor:\n            cls_avg_factor = reduce_mean(\n                cls_scores.new_tensor([cls_avg_factor]))\n        cls_avg_factor = max(cls_avg_factor, 1)\n\n        loss_cls = self.loss_cls(\n            cls_scores, labels, label_weights, avg_factor=cls_avg_factor)\n\n        # Compute the average number of gt boxes across all gpus, for\n        # normalization purposes\n        num_total_pos = loss_cls.new_tensor([num_total_pos])\n        num_total_pos = torch.clamp(reduce_mean(num_total_pos), min=1).item()\n\n        # construct factors used for rescale bboxes\n        factors = []\n        for img_meta, bbox_pred in zip(img_metas, bbox_preds):\n            img_h, img_w, _ = img_meta['img_shape']\n            factor = bbox_pred.new_tensor([img_w, img_h, img_w,\n                                           img_h]).unsqueeze(0).repeat(\n                                               bbox_pred.size(0), 1)\n            factors.append(factor)\n        factors = torch.cat(factors, 0)\n\n        # DETR regress the relative position of boxes (cxcywh) in the image,\n        # thus the learning target is normalized by the image size. So here\n        # we need to re-scale them for calculating IoU loss\n        bbox_preds = bbox_preds.reshape(-1, 4)\n        bboxes = bbox_cxcywh_to_xyxy(bbox_preds) * factors\n        bboxes_gt = bbox_cxcywh_to_xyxy(bbox_targets) * factors\n\n        # regression IoU loss, defaultly GIoU loss\n        loss_iou = self.loss_iou(\n            bboxes, bboxes_gt, bbox_weights, avg_factor=num_total_pos)\n\n        # regression L1 loss\n        loss_bbox = self.loss_bbox(\n            bbox_preds, bbox_targets, bbox_weights, avg_factor=num_total_pos)\n        return loss_cls, loss_bbox, loss_iou\n\n    def get_targets(self,\n                    cls_scores_list,\n                    bbox_preds_list,\n                    gt_bboxes_list,\n                    gt_labels_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None):\n        \"\"\"\"Compute regression and classification targets for a batch image.\n\n        Outputs from a single decoder layer of a single feature level are used.\n\n        Args:\n            cls_scores_list (list[Tensor]): Box score logits from a single\n                decoder layer for each image with shape [num_query,\n                cls_out_channels].\n            bbox_preds_list (list[Tensor]): Sigmoid outputs from a single\n                decoder layer for each image, with normalized coordinate\n                (cx, cy, w, h) and shape [num_query, 4].\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes for each image\n                with shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels_list (list[Tensor]): Ground truth class indices for each\n                image with shape (num_gts, ).\n            img_metas (list[dict]): List of image meta information.\n            gt_bboxes_ignore_list (list[Tensor], optional): Bounding\n                boxes which can be ignored for each image. Default None.\n\n        Returns:\n            tuple: a tuple containing the following targets.\n\n                - labels_list (list[Tensor]): Labels for all images.\n                - label_weights_list (list[Tensor]): Label weights for all \\\n                    images.\n                - bbox_targets_list (list[Tensor]): BBox targets for all \\\n                    images.\n                - bbox_weights_list (list[Tensor]): BBox weights for all \\\n                    images.\n                - num_total_pos (int): Number of positive samples in all \\\n                    images.\n                - num_total_neg (int): Number of negative samples in all \\\n                    images.\n        \"\"\"\n        assert gt_bboxes_ignore_list is None, \\\n            'Only supports for gt_bboxes_ignore setting to None.'\n        num_imgs = len(cls_scores_list)\n        gt_bboxes_ignore_list = [\n            gt_bboxes_ignore_list for _ in range(num_imgs)\n        ]\n\n        (labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, pos_inds_list, neg_inds_list) = multi_apply(\n             self._get_target_single, cls_scores_list, bbox_preds_list,\n             gt_bboxes_list, gt_labels_list, img_metas, gt_bboxes_ignore_list)\n        num_total_pos = sum((inds.numel() for inds in pos_inds_list))\n        num_total_neg = sum((inds.numel() for inds in neg_inds_list))\n        return (labels_list, label_weights_list, bbox_targets_list,\n                bbox_weights_list, num_total_pos, num_total_neg)\n\n    def _get_target_single(self,\n                           cls_score,\n                           bbox_pred,\n                           gt_bboxes,\n                           gt_labels,\n                           img_meta,\n                           gt_bboxes_ignore=None):\n        \"\"\"\"Compute regression and classification targets for one image.\n\n        Outputs from a single decoder layer of a single feature level are used.\n\n        Args:\n            cls_score (Tensor): Box score logits from a single decoder layer\n                for one image. Shape [num_query, cls_out_channels].\n            bbox_pred (Tensor): Sigmoid outputs from a single decoder layer\n                for one image, with normalized coordinate (cx, cy, w, h) and\n                shape [num_query, 4].\n            gt_bboxes (Tensor): Ground truth bboxes for one image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (Tensor): Ground truth class indices for one image\n                with shape (num_gts, ).\n            img_meta (dict): Meta information for one image.\n            gt_bboxes_ignore (Tensor, optional): Bounding boxes\n                which can be ignored. Default None.\n\n        Returns:\n            tuple[Tensor]: a tuple containing the following for one image.\n\n                - labels (Tensor): Labels of each image.\n                - label_weights (Tensor]): Label weights of each image.\n                - bbox_targets (Tensor): BBox targets of each image.\n                - bbox_weights (Tensor): BBox weights of each image.\n                - pos_inds (Tensor): Sampled positive indices for each image.\n                - neg_inds (Tensor): Sampled negative indices for each image.\n        \"\"\"\n\n        num_bboxes = bbox_pred.size(0)\n        # assigner and sampler\n        assign_result = self.assigner.assign(bbox_pred, cls_score, gt_bboxes,\n                                             gt_labels, img_meta,\n                                             gt_bboxes_ignore)\n        sampling_result = self.sampler.sample(assign_result, bbox_pred,\n                                              gt_bboxes)\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n\n        # label targets\n        labels = gt_bboxes.new_full((num_bboxes, ),\n                                    self.num_classes,\n                                    dtype=torch.long)\n        labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]\n        label_weights = gt_bboxes.new_ones(num_bboxes)\n\n        # bbox targets\n        bbox_targets = torch.zeros_like(bbox_pred)\n        bbox_weights = torch.zeros_like(bbox_pred)\n        bbox_weights[pos_inds] = 1.0\n        img_h, img_w, _ = img_meta['img_shape']\n\n        # DETR regress the relative position of boxes (cxcywh) in the image.\n        # Thus the learning target should be normalized by the image size, also\n        # the box format should be converted from defaultly x1y1x2y2 to cxcywh.\n        factor = bbox_pred.new_tensor([img_w, img_h, img_w,\n                                       img_h]).unsqueeze(0)\n        pos_gt_bboxes_normalized = sampling_result.pos_gt_bboxes / factor\n        pos_gt_bboxes_targets = bbox_xyxy_to_cxcywh(pos_gt_bboxes_normalized)\n        bbox_targets[pos_inds] = pos_gt_bboxes_targets\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                neg_inds)\n\n    # over-write because img_metas are needed as inputs for bbox_head.\n    def forward_train(self,\n                      x,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels=None,\n                      gt_bboxes_ignore=None,\n                      proposal_cfg=None,\n                      **kwargs):\n        \"\"\"Forward function for training mode.\n\n        Args:\n            x (list[Tensor]): Features from backbone.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes (Tensor): Ground truth bboxes of the image,\n                shape (num_gts, 4).\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts,).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n            proposal_cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert proposal_cfg is None, '\"proposal_cfg\" must be None'\n        outs = self(x, img_metas)\n        if gt_labels is None:\n            loss_inputs = outs + (gt_bboxes, img_metas)\n        else:\n            loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)\n        losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n        return losses\n\n    @force_fp32(apply_to=('all_cls_scores_list', 'all_bbox_preds_list'))\n    def get_bboxes(self,\n                   all_cls_scores_list,\n                   all_bbox_preds_list,\n                   img_metas,\n                   rescale=False):\n        \"\"\"Transform network outputs for a batch into bbox predictions.\n\n        Args:\n            all_cls_scores_list (list[Tensor]): Classification outputs\n                for each feature level. Each is a 4D-tensor with shape\n                [nb_dec, bs, num_query, cls_out_channels].\n            all_bbox_preds_list (list[Tensor]): Sigmoid regression\n                outputs for each feature level. Each is a 4D-tensor with\n                normalized coordinate format (cx, cy, w, h) and shape\n                [nb_dec, bs, num_query, 4].\n            img_metas (list[dict]): Meta information of each image.\n            rescale (bool, optional): If True, return boxes in original\n                image space. Default False.\n\n        Returns:\n            list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple. \\\n                The first item is an (n, 5) tensor, where the first 4 columns \\\n                are bounding box positions (tl_x, tl_y, br_x, br_y) and the \\\n                5-th column is a score between 0 and 1. The second item is a \\\n                (n,) tensor where each item is the predicted class label of \\\n                the corresponding box.\n        \"\"\"\n        # NOTE defaultly only using outputs from the last feature level,\n        # and only the outputs from the last decoder layer is used.\n        cls_scores = all_cls_scores_list[-1][-1]\n        bbox_preds = all_bbox_preds_list[-1][-1]\n\n        result_list = []\n        for img_id in range(len(img_metas)):\n            cls_score = cls_scores[img_id]\n            bbox_pred = bbox_preds[img_id]\n            img_shape = img_metas[img_id]['img_shape']\n            scale_factor = img_metas[img_id]['scale_factor']\n            proposals = self._get_bboxes_single(cls_score, bbox_pred,\n                                                img_shape, scale_factor,\n                                                rescale)\n            result_list.append(proposals)\n\n        return result_list\n\n    def _get_bboxes_single(self,\n                           cls_score,\n                           bbox_pred,\n                           img_shape,\n                           scale_factor,\n                           rescale=False):\n        \"\"\"Transform outputs from the last decoder layer into bbox predictions\n        for each image.\n\n        Args:\n            cls_score (Tensor): Box score logits from the last decoder layer\n                for each image. Shape [num_query, cls_out_channels].\n            bbox_pred (Tensor): Sigmoid outputs from the last decoder layer\n                for each image, with coordinate format (cx, cy, w, h) and\n                shape [num_query, 4].\n            img_shape (tuple[int]): Shape of input image, (height, width, 3).\n            scale_factor (ndarray, optional): Scale factor of the image arange\n                as (w_scale, h_scale, w_scale, h_scale).\n            rescale (bool, optional): If True, return boxes in original image\n                space. Default False.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels.\n\n                - det_bboxes: Predicted bboxes with shape [num_query, 5], \\\n                    where the first 4 columns are bounding box positions \\\n                    (tl_x, tl_y, br_x, br_y) and the 5-th column are scores \\\n                    between 0 and 1.\n                - det_labels: Predicted labels of the corresponding box with \\\n                    shape [num_query].\n        \"\"\"\n        assert len(cls_score) == len(bbox_pred)\n        max_per_img = self.test_cfg.get('max_per_img', self.num_query)\n        # exclude background\n        if self.loss_cls.use_sigmoid:\n            cls_score = cls_score.sigmoid()\n            scores, indexes = cls_score.view(-1).topk(max_per_img)\n            det_labels = indexes % self.num_classes\n            bbox_index = indexes // self.num_classes\n            bbox_pred = bbox_pred[bbox_index]\n        else:\n            scores, det_labels = F.softmax(cls_score, dim=-1)[..., :-1].max(-1)\n            scores, bbox_index = scores.topk(max_per_img)\n            bbox_pred = bbox_pred[bbox_index]\n            det_labels = det_labels[bbox_index]\n\n        det_bboxes = bbox_cxcywh_to_xyxy(bbox_pred)\n        det_bboxes[:, 0::2] = det_bboxes[:, 0::2] * img_shape[1]\n        det_bboxes[:, 1::2] = det_bboxes[:, 1::2] * img_shape[0]\n        det_bboxes[:, 0::2].clamp_(min=0, max=img_shape[1])\n        det_bboxes[:, 1::2].clamp_(min=0, max=img_shape[0])\n        if rescale:\n            det_bboxes /= det_bboxes.new_tensor(scale_factor)\n        det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(1)), -1)\n\n        return det_bboxes, det_labels\n\n    def simple_test_bboxes(self, feats, img_metas, rescale=False):\n        \"\"\"Test det bboxes without test-time augmentation.\n\n        Args:\n            feats (tuple[torch.Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            img_metas (list[dict]): List of image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n                The first item is ``bboxes`` with shape (n, 5),\n                where 5 represent (tl_x, tl_y, br_x, br_y, score).\n                The shape of the second tensor in the tuple is ``labels``\n                with shape (n,)\n        \"\"\"\n        # forward of this head requires img_metas\n        outs = self.forward(feats, img_metas)\n        results_list = self.get_bboxes(*outs, img_metas, rescale=rescale)\n        return results_list\n\n    def forward_onnx(self, feats, img_metas):\n        \"\"\"Forward function for exporting to ONNX.\n\n        Over-write `forward` because: `masks` is directly created with\n        zero (valid position tag) and has the same spatial size as `x`.\n        Thus the construction of `masks` is different from that in `forward`.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            tuple[list[Tensor], list[Tensor]]: Outputs for all scale levels.\n\n                - all_cls_scores_list (list[Tensor]): Classification scores \\\n                    for each scale level. Each is a 4D-tensor with shape \\\n                    [nb_dec, bs, num_query, cls_out_channels]. Note \\\n                    `cls_out_channels` should includes background.\n                - all_bbox_preds_list (list[Tensor]): Sigmoid regression \\\n                    outputs for each scale level. Each is a 4D-tensor with \\\n                    normalized coordinate format (cx, cy, w, h) and shape \\\n                    [nb_dec, bs, num_query, 4].\n        \"\"\"\n        num_levels = len(feats)\n        img_metas_list = [img_metas for _ in range(num_levels)]\n        return multi_apply(self.forward_single_onnx, feats, img_metas_list)\n\n    def forward_single_onnx(self, x, img_metas):\n        \"\"\"\"Forward function for a single feature level with ONNX exportation.\n\n        Args:\n            x (Tensor): Input feature from backbone's single stage, shape\n                [bs, c, h, w].\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            all_cls_scores (Tensor): Outputs from the classification head,\n                shape [nb_dec, bs, num_query, cls_out_channels]. Note\n                cls_out_channels should includes background.\n            all_bbox_preds (Tensor): Sigmoid outputs from the regression\n                head with normalized coordinate format (cx, cy, w, h).\n                Shape [nb_dec, bs, num_query, 4].\n        \"\"\"\n        # Note `img_shape` is not dynamically traceable to ONNX,\n        # since the related augmentation was done with numpy under\n        # CPU. Thus `masks` is directly created with zeros (valid tag)\n        # and the same spatial shape as `x`.\n        # The difference between torch and exported ONNX model may be\n        # ignored, since the same performance is achieved (e.g.\n        # 40.1 vs 40.1 for DETR)\n        batch_size = x.size(0)\n        h, w = x.size()[-2:]\n        masks = x.new_zeros((batch_size, h, w))  # [B,h,w]\n\n        x = self.input_proj(x)\n        # interpolate masks to have the same spatial shape with x\n        masks = F.interpolate(\n            masks.unsqueeze(1), size=x.shape[-2:]).to(torch.bool).squeeze(1)\n        pos_embed = self.positional_encoding(masks)\n        outs_dec, _ = self.transformer(x, masks, self.query_embedding.weight,\n                                       pos_embed)\n\n        all_cls_scores = self.fc_cls(outs_dec)\n        all_bbox_preds = self.fc_reg(self.activate(\n            self.reg_ffn(outs_dec))).sigmoid()\n        return all_cls_scores, all_bbox_preds\n\n    def onnx_export(self, all_cls_scores_list, all_bbox_preds_list, img_metas):\n        \"\"\"Transform network outputs into bbox predictions, with ONNX\n        exportation.\n\n        Args:\n            all_cls_scores_list (list[Tensor]): Classification outputs\n                for each feature level. Each is a 4D-tensor with shape\n                [nb_dec, bs, num_query, cls_out_channels].\n            all_bbox_preds_list (list[Tensor]): Sigmoid regression\n                outputs for each feature level. Each is a 4D-tensor with\n                normalized coordinate format (cx, cy, w, h) and shape\n                [nb_dec, bs, num_query, 4].\n            img_metas (list[dict]): Meta information of each image.\n\n        Returns:\n            tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n                and class labels of shape [N, num_det].\n        \"\"\"\n        assert len(img_metas) == 1, \\\n            'Only support one input image while in exporting to ONNX'\n\n        cls_scores = all_cls_scores_list[-1][-1]\n        bbox_preds = all_bbox_preds_list[-1][-1]\n\n        # Note `img_shape` is not dynamically traceable to ONNX,\n        # here `img_shape_for_onnx` (padded shape of image tensor)\n        # is used.\n        img_shape = img_metas[0]['img_shape_for_onnx']\n        max_per_img = self.test_cfg.get('max_per_img', self.num_query)\n        batch_size = cls_scores.size(0)\n        # `batch_index_offset` is used for the gather of concatenated tensor\n        batch_index_offset = torch.arange(batch_size).to(\n            cls_scores.device) * max_per_img\n        batch_index_offset = batch_index_offset.unsqueeze(1).expand(\n            batch_size, max_per_img)\n\n        # supports dynamical batch inference\n        if self.loss_cls.use_sigmoid:\n            cls_scores = cls_scores.sigmoid()\n            scores, indexes = cls_scores.view(batch_size, -1).topk(\n                max_per_img, dim=1)\n            det_labels = indexes % self.num_classes\n            bbox_index = indexes // self.num_classes\n            bbox_index = (bbox_index + batch_index_offset).view(-1)\n            bbox_preds = bbox_preds.view(-1, 4)[bbox_index]\n            bbox_preds = bbox_preds.view(batch_size, -1, 4)\n        else:\n            scores, det_labels = F.softmax(\n                cls_scores, dim=-1)[..., :-1].max(-1)\n            scores, bbox_index = scores.topk(max_per_img, dim=1)\n            bbox_index = (bbox_index + batch_index_offset).view(-1)\n            bbox_preds = bbox_preds.view(-1, 4)[bbox_index]\n            det_labels = det_labels.view(-1)[bbox_index]\n            bbox_preds = bbox_preds.view(batch_size, -1, 4)\n            det_labels = det_labels.view(batch_size, -1)\n\n        det_bboxes = bbox_cxcywh_to_xyxy(bbox_preds)\n        # use `img_shape_tensor` for dynamically exporting to ONNX\n        img_shape_tensor = img_shape.flip(0).repeat(2)  # [w,h,w,h]\n        img_shape_tensor = img_shape_tensor.unsqueeze(0).unsqueeze(0).expand(\n            batch_size, det_bboxes.size(1), 4)\n        det_bboxes = det_bboxes * img_shape_tensor\n        # dynamically clip bboxes\n        x1, y1, x2, y2 = det_bboxes.split((1, 1, 1, 1), dim=-1)\n        from mmdet.core.export import dynamic_clip_for_onnx\n        x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, img_shape)\n        det_bboxes = torch.cat([x1, y1, x2, y2], dim=-1)\n        det_bboxes = torch.cat((det_bboxes, scores.unsqueeze(-1)), -1)\n\n        return det_bboxes, det_labels\n"
  },
  {
    "path": "mmdet/models/dense_heads/embedding_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner import BaseModule\n\nfrom mmdet.models.builder import HEADS\nfrom ...core import bbox_cxcywh_to_xyxy\n\n\n@HEADS.register_module()\nclass EmbeddingRPNHead(BaseModule):\n    \"\"\"RPNHead in the `Sparse R-CNN <https://arxiv.org/abs/2011.12450>`_ .\n\n    Unlike traditional RPNHead, this module does not need FPN input, but just\n    decode `init_proposal_bboxes` and expand the first dimension of\n    `init_proposal_bboxes` and `init_proposal_features` to the batch_size.\n\n    Args:\n        num_proposals (int): Number of init_proposals. Default 100.\n        proposal_feature_channel (int): Channel number of\n            init_proposal_feature. Defaults to 256.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 num_proposals=100,\n                 proposal_feature_channel=256,\n                 init_cfg=None,\n                 **kwargs):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super(EmbeddingRPNHead, self).__init__(init_cfg)\n        self.num_proposals = num_proposals\n        self.proposal_feature_channel = proposal_feature_channel\n        self._init_layers()\n\n    def _init_layers(self):\n        \"\"\"Initialize a sparse set of proposal boxes and proposal features.\"\"\"\n        self.init_proposal_bboxes = nn.Embedding(self.num_proposals, 4)\n        self.init_proposal_features = nn.Embedding(\n            self.num_proposals, self.proposal_feature_channel)\n\n    def init_weights(self):\n        \"\"\"Initialize the init_proposal_bboxes as normalized.\n\n        [c_x, c_y, w, h], and we initialize it to the size of  the entire\n        image.\n        \"\"\"\n        super(EmbeddingRPNHead, self).init_weights()\n        nn.init.constant_(self.init_proposal_bboxes.weight[:, :2], 0.5)\n        nn.init.constant_(self.init_proposal_bboxes.weight[:, 2:], 1)\n\n    def _decode_init_proposals(self, imgs, img_metas):\n        \"\"\"Decode init_proposal_bboxes according to the size of images and\n        expand dimension of init_proposal_features to batch_size.\n\n        Args:\n            imgs (list[Tensor]): List of FPN features.\n            img_metas (list[dict]): List of meta-information of\n                images. Need the img_shape to decode the init_proposals.\n\n        Returns:\n            Tuple(Tensor):\n\n                - proposals (Tensor): Decoded proposal bboxes,\n                  has shape (batch_size, num_proposals, 4).\n                - init_proposal_features (Tensor): Expanded proposal\n                  features, has shape\n                  (batch_size, num_proposals, proposal_feature_channel).\n                - imgs_whwh (Tensor): Tensor with shape\n                  (batch_size, 4), the dimension means\n                  [img_width, img_height, img_width, img_height].\n        \"\"\"\n        proposals = self.init_proposal_bboxes.weight.clone()\n        proposals = bbox_cxcywh_to_xyxy(proposals)\n        num_imgs = len(imgs[0])\n        imgs_whwh = []\n        for meta in img_metas:\n            h, w, _ = meta['img_shape']\n            imgs_whwh.append(imgs[0].new_tensor([[w, h, w, h]]))\n        imgs_whwh = torch.cat(imgs_whwh, dim=0)\n        imgs_whwh = imgs_whwh[:, None, :]\n\n        # imgs_whwh has shape (batch_size, 1, 4)\n        # The shape of proposals change from (num_proposals, 4)\n        # to (batch_size ,num_proposals, 4)\n        proposals = proposals * imgs_whwh\n\n        init_proposal_features = self.init_proposal_features.weight.clone()\n        init_proposal_features = init_proposal_features[None].expand(\n            num_imgs, *init_proposal_features.size())\n        return proposals, init_proposal_features, imgs_whwh\n\n    def forward_dummy(self, img, img_metas):\n        \"\"\"Dummy forward function.\n\n        Used in flops calculation.\n        \"\"\"\n        return self._decode_init_proposals(img, img_metas)\n\n    def forward_train(self, img, img_metas):\n        \"\"\"Forward function in training stage.\"\"\"\n        return self._decode_init_proposals(img, img_metas)\n\n    def simple_test_rpn(self, img, img_metas):\n        \"\"\"Forward function in testing stage.\"\"\"\n        return self._decode_init_proposals(img, img_metas)\n\n    def simple_test(self, img, img_metas):\n        \"\"\"Forward function in testing stage.\"\"\"\n        raise NotImplementedError\n\n    def aug_test_rpn(self, feats, img_metas):\n        raise NotImplementedError(\n            'EmbeddingRPNHead does not support test-time augmentation')\n"
  },
  {
    "path": "mmdet/models/dense_heads/fcos_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import Scale\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import multi_apply, reduce_mean\nfrom ..builder import HEADS, build_loss\nfrom .anchor_free_head import AnchorFreeHead\n\nINF = 1e8\n\n\n@HEADS.register_module()\nclass FCOSHead(AnchorFreeHead):\n    \"\"\"Anchor-free head used in `FCOS <https://arxiv.org/abs/1904.01355>`_.\n\n    The FCOS head does not use anchor boxes. Instead bounding boxes are\n    predicted at each pixel and a centerness measure is used to suppress\n    low-quality predictions.\n    Here norm_on_bbox, centerness_on_reg, dcn_on_last_conv are training\n    tricks used in official repo, which will bring remarkable mAP gains\n    of up to 4.9. Please see https://github.com/tianzhi0549/FCOS for\n    more detail.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        strides (list[int] | list[tuple[int, int]]): Strides of points\n            in multiple feature levels. Default: (4, 8, 16, 32, 64).\n        regress_ranges (tuple[tuple[int, int]]): Regress range of multiple\n            level points.\n        center_sampling (bool): If true, use center sampling. Default: False.\n        center_sample_radius (float): Radius of center sampling. Default: 1.5.\n        norm_on_bbox (bool): If true, normalize the regression targets\n            with FPN strides. Default: False.\n        centerness_on_reg (bool): If true, position centerness on the\n            regress branch. Please refer to https://github.com/tianzhi0549/FCOS/issues/89#issuecomment-516877042.\n            Default: False.\n        conv_bias (bool | str): If specified as `auto`, it will be decided by the\n            norm_cfg. Bias of conv will be set as True if `norm_cfg` is None, otherwise\n            False. Default: \"auto\".\n        loss_cls (dict): Config of classification loss.\n        loss_bbox (dict): Config of localization loss.\n        loss_centerness (dict): Config of centerness loss.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Default: norm_cfg=dict(type='GN', num_groups=32, requires_grad=True).\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n\n    Example:\n        >>> self = FCOSHead(11, 7)\n        >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]\n        >>> cls_score, bbox_pred, centerness = self.forward(feats)\n        >>> assert len(cls_score) == len(self.scales)\n    \"\"\"  # noqa: E501\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),\n                                 (512, INF)),\n                 center_sampling=False,\n                 center_sample_radius=1.5,\n                 norm_on_bbox=False,\n                 centerness_on_reg=False,\n                 loss_cls=dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=1.0),\n                 loss_bbox=dict(type='IoULoss', loss_weight=1.0),\n                 loss_centerness=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='conv_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs):\n        self.regress_ranges = regress_ranges\n        self.center_sampling = center_sampling\n        self.center_sample_radius = center_sample_radius\n        self.norm_on_bbox = norm_on_bbox\n        self.centerness_on_reg = centerness_on_reg\n        super().__init__(\n            num_classes,\n            in_channels,\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            norm_cfg=norm_cfg,\n            init_cfg=init_cfg,\n            **kwargs)\n        self.loss_centerness = build_loss(loss_centerness)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        super()._init_layers()\n        self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)\n        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple:\n                cls_scores (list[Tensor]): Box scores for each scale level, \\\n                    each is a 4D-tensor, the channel number is \\\n                    num_points * num_classes.\n                bbox_preds (list[Tensor]): Box energies / deltas for each \\\n                    scale level, each is a 4D-tensor, the channel number is \\\n                    num_points * 4.\n                centernesses (list[Tensor]): centerness for each scale level, \\\n                    each is a 4D-tensor, the channel number is num_points * 1.\n        \"\"\"\n        return multi_apply(self.forward_single, feats, self.scales,\n                           self.strides)\n\n    def forward_single(self, x, scale, stride):\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n            scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n            stride (int): The corresponding stride for feature maps, only\n                used to normalize the bbox prediction when self.norm_on_bbox\n                is True.\n\n        Returns:\n            tuple: scores for each class, bbox predictions and centerness \\\n                predictions of input feature maps.\n        \"\"\"\n        cls_score, bbox_pred, cls_feat, reg_feat = super().forward_single(x)\n        if self.centerness_on_reg:\n            centerness = self.conv_centerness(reg_feat)\n        else:\n            centerness = self.conv_centerness(cls_feat)\n        # scale the bbox_pred of different level\n        # float to avoid overflow when enabling FP16\n        bbox_pred = scale(bbox_pred).float()\n        if self.norm_on_bbox:\n            # bbox_pred needed for gradient computation has been modified\n            # by F.relu(bbox_pred) when run with PyTorch 1.10. So replace\n            # F.relu(bbox_pred) with bbox_pred.clamp(min=0)\n            bbox_pred = bbox_pred.clamp(min=0)\n            if not self.training:\n                bbox_pred *= stride\n        else:\n            bbox_pred = bbox_pred.exp()\n        return cls_score, bbox_pred, centerness\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'centernesses'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             centernesses,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute loss of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is\n                num_points * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_points * 4.\n            centernesses (list[Tensor]): centerness for each scale level, each\n                is a 4D-tensor, the channel number is num_points * 1.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds) == len(centernesses)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        all_level_points = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device)\n        labels, bbox_targets = self.get_targets(all_level_points, gt_bboxes,\n                                                gt_labels)\n\n        num_imgs = cls_scores[0].size(0)\n        # flatten cls_scores, bbox_preds and centerness\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_centerness = [\n            centerness.permute(0, 2, 3, 1).reshape(-1)\n            for centerness in centernesses\n        ]\n        flatten_cls_scores = torch.cat(flatten_cls_scores)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n        flatten_centerness = torch.cat(flatten_centerness)\n        flatten_labels = torch.cat(labels)\n        flatten_bbox_targets = torch.cat(bbox_targets)\n        # repeat points to align with bbox_preds\n        flatten_points = torch.cat(\n            [points.repeat(num_imgs, 1) for points in all_level_points])\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((flatten_labels >= 0)\n                    & (flatten_labels < bg_class_ind)).nonzero().reshape(-1)\n        num_pos = torch.tensor(\n            len(pos_inds), dtype=torch.float, device=bbox_preds[0].device)\n        num_pos = max(reduce_mean(num_pos), 1.0)\n        loss_cls = self.loss_cls(\n            flatten_cls_scores, flatten_labels, avg_factor=num_pos)\n\n        pos_bbox_preds = flatten_bbox_preds[pos_inds]\n        pos_centerness = flatten_centerness[pos_inds]\n        pos_bbox_targets = flatten_bbox_targets[pos_inds]\n        pos_centerness_targets = self.centerness_target(pos_bbox_targets)\n        # centerness weighted iou loss\n        centerness_denorm = max(\n            reduce_mean(pos_centerness_targets.sum().detach()), 1e-6)\n\n        if len(pos_inds) > 0:\n            pos_points = flatten_points[pos_inds]\n            pos_decoded_bbox_preds = self.bbox_coder.decode(\n                pos_points, pos_bbox_preds)\n            pos_decoded_target_preds = self.bbox_coder.decode(\n                pos_points, pos_bbox_targets)\n            loss_bbox = self.loss_bbox(\n                pos_decoded_bbox_preds,\n                pos_decoded_target_preds,\n                weight=pos_centerness_targets,\n                avg_factor=centerness_denorm)\n            loss_centerness = self.loss_centerness(\n                pos_centerness, pos_centerness_targets, avg_factor=num_pos)\n        else:\n            loss_bbox = pos_bbox_preds.sum()\n            loss_centerness = pos_centerness.sum()\n\n        return dict(\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            loss_centerness=loss_centerness)\n\n    def get_targets(self, points, gt_bboxes_list, gt_labels_list):\n        \"\"\"Compute regression, classification and centerness targets for points\n        in multiple images.\n\n        Args:\n            points (list[Tensor]): Points of each fpn level, each has shape\n                (num_points, 2).\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,\n                each has shape (num_gt, 4).\n            gt_labels_list (list[Tensor]): Ground truth labels of each box,\n                each has shape (num_gt,).\n\n        Returns:\n            tuple:\n                concat_lvl_labels (list[Tensor]): Labels of each level. \\\n                concat_lvl_bbox_targets (list[Tensor]): BBox targets of each \\\n                    level.\n        \"\"\"\n        assert len(points) == len(self.regress_ranges)\n        num_levels = len(points)\n        # expand regress ranges to align with points\n        expanded_regress_ranges = [\n            points[i].new_tensor(self.regress_ranges[i])[None].expand_as(\n                points[i]) for i in range(num_levels)\n        ]\n        # concat all levels points and regress ranges\n        concat_regress_ranges = torch.cat(expanded_regress_ranges, dim=0)\n        concat_points = torch.cat(points, dim=0)\n\n        # the number of points per img, per lvl\n        num_points = [center.size(0) for center in points]\n\n        # get labels and bbox_targets of each image\n        labels_list, bbox_targets_list = multi_apply(\n            self._get_target_single,\n            gt_bboxes_list,\n            gt_labels_list,\n            points=concat_points,\n            regress_ranges=concat_regress_ranges,\n            num_points_per_lvl=num_points)\n\n        # split to per img, per level\n        labels_list = [labels.split(num_points, 0) for labels in labels_list]\n        bbox_targets_list = [\n            bbox_targets.split(num_points, 0)\n            for bbox_targets in bbox_targets_list\n        ]\n\n        # concat per level image\n        concat_lvl_labels = []\n        concat_lvl_bbox_targets = []\n        for i in range(num_levels):\n            concat_lvl_labels.append(\n                torch.cat([labels[i] for labels in labels_list]))\n            bbox_targets = torch.cat(\n                [bbox_targets[i] for bbox_targets in bbox_targets_list])\n            if self.norm_on_bbox:\n                bbox_targets = bbox_targets / self.strides[i]\n            concat_lvl_bbox_targets.append(bbox_targets)\n        return concat_lvl_labels, concat_lvl_bbox_targets\n\n    def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ranges,\n                           num_points_per_lvl):\n        \"\"\"Compute regression and classification targets for a single image.\"\"\"\n        num_points = points.size(0)\n        num_gts = gt_labels.size(0)\n        if num_gts == 0:\n            return gt_labels.new_full((num_points,), self.num_classes), \\\n                   gt_bboxes.new_zeros((num_points, 4))\n\n        areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (\n            gt_bboxes[:, 3] - gt_bboxes[:, 1])\n        # TODO: figure out why these two are different\n        # areas = areas[None].expand(num_points, num_gts)\n        areas = areas[None].repeat(num_points, 1)\n        regress_ranges = regress_ranges[:, None, :].expand(\n            num_points, num_gts, 2)\n        gt_bboxes = gt_bboxes[None].expand(num_points, num_gts, 4)\n        xs, ys = points[:, 0], points[:, 1]\n        xs = xs[:, None].expand(num_points, num_gts)\n        ys = ys[:, None].expand(num_points, num_gts)\n\n        left = xs - gt_bboxes[..., 0]\n        right = gt_bboxes[..., 2] - xs\n        top = ys - gt_bboxes[..., 1]\n        bottom = gt_bboxes[..., 3] - ys\n        bbox_targets = torch.stack((left, top, right, bottom), -1)\n\n        if self.center_sampling:\n            # condition1: inside a `center bbox`\n            radius = self.center_sample_radius\n            center_xs = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) / 2\n            center_ys = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) / 2\n            center_gts = torch.zeros_like(gt_bboxes)\n            stride = center_xs.new_zeros(center_xs.shape)\n\n            # project the points on current lvl back to the `original` sizes\n            lvl_begin = 0\n            for lvl_idx, num_points_lvl in enumerate(num_points_per_lvl):\n                lvl_end = lvl_begin + num_points_lvl\n                stride[lvl_begin:lvl_end] = self.strides[lvl_idx] * radius\n                lvl_begin = lvl_end\n\n            x_mins = center_xs - stride\n            y_mins = center_ys - stride\n            x_maxs = center_xs + stride\n            y_maxs = center_ys + stride\n            center_gts[..., 0] = torch.where(x_mins > gt_bboxes[..., 0],\n                                             x_mins, gt_bboxes[..., 0])\n            center_gts[..., 1] = torch.where(y_mins > gt_bboxes[..., 1],\n                                             y_mins, gt_bboxes[..., 1])\n            center_gts[..., 2] = torch.where(x_maxs > gt_bboxes[..., 2],\n                                             gt_bboxes[..., 2], x_maxs)\n            center_gts[..., 3] = torch.where(y_maxs > gt_bboxes[..., 3],\n                                             gt_bboxes[..., 3], y_maxs)\n\n            cb_dist_left = xs - center_gts[..., 0]\n            cb_dist_right = center_gts[..., 2] - xs\n            cb_dist_top = ys - center_gts[..., 1]\n            cb_dist_bottom = center_gts[..., 3] - ys\n            center_bbox = torch.stack(\n                (cb_dist_left, cb_dist_top, cb_dist_right, cb_dist_bottom), -1)\n            inside_gt_bbox_mask = center_bbox.min(-1)[0] > 0\n        else:\n            # condition1: inside a gt bbox\n            inside_gt_bbox_mask = bbox_targets.min(-1)[0] > 0\n\n        # condition2: limit the regression range for each location\n        max_regress_distance = bbox_targets.max(-1)[0]\n        inside_regress_range = (\n            (max_regress_distance >= regress_ranges[..., 0])\n            & (max_regress_distance <= regress_ranges[..., 1]))\n\n        # if there are still more than one objects for a location,\n        # we choose the one with minimal area\n        areas[inside_gt_bbox_mask == 0] = INF\n        areas[inside_regress_range == 0] = INF\n        min_area, min_area_inds = areas.min(dim=1)\n\n        labels = gt_labels[min_area_inds]\n        labels[min_area == INF] = self.num_classes  # set as BG\n        bbox_targets = bbox_targets[range(num_points), min_area_inds]\n\n        return labels, bbox_targets\n\n    def centerness_target(self, pos_bbox_targets):\n        \"\"\"Compute centerness targets.\n\n        Args:\n            pos_bbox_targets (Tensor): BBox targets of positive bboxes in shape\n                (num_pos, 4)\n\n        Returns:\n            Tensor: Centerness target.\n        \"\"\"\n        # only calculate pos centerness targets, otherwise there may be nan\n        left_right = pos_bbox_targets[:, [0, 2]]\n        top_bottom = pos_bbox_targets[:, [1, 3]]\n        if len(left_right) == 0:\n            centerness_targets = left_right[..., 0]\n        else:\n            centerness_targets = (\n                left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * (\n                    top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0])\n        return torch.sqrt(centerness_targets)\n\n    def _get_points_single(self,\n                           featmap_size,\n                           stride,\n                           dtype,\n                           device,\n                           flatten=False):\n        \"\"\"Get points according to feature map size.\n\n        This function will be deprecated soon.\n        \"\"\"\n        warnings.warn(\n            '`_get_points_single` in `FCOSHead` will be '\n            'deprecated soon, we support a multi level point generator now'\n            'you can get points of a single level feature map '\n            'with `self.prior_generator.single_level_grid_priors` ')\n\n        y, x = super()._get_points_single(featmap_size, stride, dtype, device)\n        points = torch.stack((x.reshape(-1) * stride, y.reshape(-1) * stride),\n                             dim=-1) + stride // 2\n        return points\n"
  },
  {
    "path": "mmdet/models/dense_heads/fovea_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import DeformConv2d\nfrom mmcv.runner import BaseModule\n\nfrom mmdet.core import multi_apply\nfrom mmdet.core.utils import filter_scores_and_topk\nfrom ..builder import HEADS\nfrom .anchor_free_head import AnchorFreeHead\n\nINF = 1e8\n\n\nclass FeatureAlign(BaseModule):\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_size=3,\n                 deform_groups=4,\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.1,\n                     override=dict(\n                         type='Normal', name='conv_adaption', std=0.01))):\n        super(FeatureAlign, self).__init__(init_cfg)\n        offset_channels = kernel_size * kernel_size * 2\n        self.conv_offset = nn.Conv2d(\n            4, deform_groups * offset_channels, 1, bias=False)\n        self.conv_adaption = DeformConv2d(\n            in_channels,\n            out_channels,\n            kernel_size=kernel_size,\n            padding=(kernel_size - 1) // 2,\n            deform_groups=deform_groups)\n        self.relu = nn.ReLU(inplace=True)\n\n    def forward(self, x, shape):\n        offset = self.conv_offset(shape)\n        x = self.relu(self.conv_adaption(x, offset))\n        return x\n\n\n@HEADS.register_module()\nclass FoveaHead(AnchorFreeHead):\n    \"\"\"FoveaBox: Beyond Anchor-based Object Detector\n    https://arxiv.org/abs/1904.03797\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 base_edge_list=(16, 32, 64, 128, 256),\n                 scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128,\n                                                                         512)),\n                 sigma=0.4,\n                 with_deform=False,\n                 deform_groups=4,\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='conv_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs):\n        self.base_edge_list = base_edge_list\n        self.scale_ranges = scale_ranges\n        self.sigma = sigma\n        self.with_deform = with_deform\n        self.deform_groups = deform_groups\n        super().__init__(num_classes, in_channels, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self):\n        # box branch\n        super()._init_reg_convs()\n        self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n\n        # cls branch\n        if not self.with_deform:\n            super()._init_cls_convs()\n            self.conv_cls = nn.Conv2d(\n                self.feat_channels, self.cls_out_channels, 3, padding=1)\n        else:\n            self.cls_convs = nn.ModuleList()\n            self.cls_convs.append(\n                ConvModule(\n                    self.feat_channels, (self.feat_channels * 4),\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.norm_cfg is None))\n            self.cls_convs.append(\n                ConvModule((self.feat_channels * 4), (self.feat_channels * 4),\n                           1,\n                           stride=1,\n                           padding=0,\n                           conv_cfg=self.conv_cfg,\n                           norm_cfg=self.norm_cfg,\n                           bias=self.norm_cfg is None))\n            self.feature_adaption = FeatureAlign(\n                self.feat_channels,\n                self.feat_channels,\n                kernel_size=3,\n                deform_groups=self.deform_groups)\n            self.conv_cls = nn.Conv2d(\n                int(self.feat_channels * 4),\n                self.cls_out_channels,\n                3,\n                padding=1)\n\n    def forward_single(self, x):\n        cls_feat = x\n        reg_feat = x\n        for reg_layer in self.reg_convs:\n            reg_feat = reg_layer(reg_feat)\n        bbox_pred = self.conv_reg(reg_feat)\n        if self.with_deform:\n            cls_feat = self.feature_adaption(cls_feat, bbox_pred.exp())\n        for cls_layer in self.cls_convs:\n            cls_feat = cls_layer(cls_feat)\n        cls_score = self.conv_cls(cls_feat)\n        return cls_score, bbox_pred\n\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bbox_list,\n             gt_label_list,\n             img_metas,\n             gt_bboxes_ignore=None):\n        assert len(cls_scores) == len(bbox_preds)\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        points = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=bbox_preds[0].dtype,\n            device=bbox_preds[0].device)\n        num_imgs = cls_scores[0].size(0)\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(-1, self.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_cls_scores = torch.cat(flatten_cls_scores)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n        flatten_labels, flatten_bbox_targets = self.get_targets(\n            gt_bbox_list, gt_label_list, featmap_sizes, points)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        pos_inds = ((flatten_labels >= 0)\n                    & (flatten_labels < self.num_classes)).nonzero().view(-1)\n        num_pos = len(pos_inds)\n\n        loss_cls = self.loss_cls(\n            flatten_cls_scores, flatten_labels, avg_factor=num_pos + num_imgs)\n        if num_pos > 0:\n            pos_bbox_preds = flatten_bbox_preds[pos_inds]\n            pos_bbox_targets = flatten_bbox_targets[pos_inds]\n            pos_weights = pos_bbox_targets.new_zeros(\n                pos_bbox_targets.size()) + 1.0\n            loss_bbox = self.loss_bbox(\n                pos_bbox_preds,\n                pos_bbox_targets,\n                pos_weights,\n                avg_factor=num_pos)\n        else:\n            loss_bbox = torch.tensor(\n                0,\n                dtype=flatten_bbox_preds.dtype,\n                device=flatten_bbox_preds.device)\n        return dict(loss_cls=loss_cls, loss_bbox=loss_bbox)\n\n    def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, points):\n        label_list, bbox_target_list = multi_apply(\n            self._get_target_single,\n            gt_bbox_list,\n            gt_label_list,\n            featmap_size_list=featmap_sizes,\n            point_list=points)\n        flatten_labels = [\n            torch.cat([\n                labels_level_img.flatten() for labels_level_img in labels_level\n            ]) for labels_level in zip(*label_list)\n        ]\n        flatten_bbox_targets = [\n            torch.cat([\n                bbox_targets_level_img.reshape(-1, 4)\n                for bbox_targets_level_img in bbox_targets_level\n            ]) for bbox_targets_level in zip(*bbox_target_list)\n        ]\n        flatten_labels = torch.cat(flatten_labels)\n        flatten_bbox_targets = torch.cat(flatten_bbox_targets)\n        return flatten_labels, flatten_bbox_targets\n\n    def _get_target_single(self,\n                           gt_bboxes_raw,\n                           gt_labels_raw,\n                           featmap_size_list=None,\n                           point_list=None):\n\n        gt_areas = torch.sqrt((gt_bboxes_raw[:, 2] - gt_bboxes_raw[:, 0]) *\n                              (gt_bboxes_raw[:, 3] - gt_bboxes_raw[:, 1]))\n        label_list = []\n        bbox_target_list = []\n        # for each pyramid, find the cls and box target\n        for base_len, (lower_bound, upper_bound), stride, featmap_size, \\\n            points in zip(self.base_edge_list, self.scale_ranges,\n                          self.strides, featmap_size_list, point_list):\n            # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n            points = points.view(*featmap_size, 2)\n            x, y = points[..., 0], points[..., 1]\n            labels = gt_labels_raw.new_zeros(featmap_size) + self.num_classes\n            bbox_targets = gt_bboxes_raw.new(featmap_size[0], featmap_size[1],\n                                             4) + 1\n            # scale assignment\n            hit_indices = ((gt_areas >= lower_bound) &\n                           (gt_areas <= upper_bound)).nonzero().flatten()\n            if len(hit_indices) == 0:\n                label_list.append(labels)\n                bbox_target_list.append(torch.log(bbox_targets))\n                continue\n            _, hit_index_order = torch.sort(-gt_areas[hit_indices])\n            hit_indices = hit_indices[hit_index_order]\n            gt_bboxes = gt_bboxes_raw[hit_indices, :] / stride\n            gt_labels = gt_labels_raw[hit_indices]\n            half_w = 0.5 * (gt_bboxes[:, 2] - gt_bboxes[:, 0])\n            half_h = 0.5 * (gt_bboxes[:, 3] - gt_bboxes[:, 1])\n            # valid fovea area: left, right, top, down\n            pos_left = torch.ceil(\n                gt_bboxes[:, 0] + (1 - self.sigma) * half_w - 0.5).long(). \\\n                clamp(0, featmap_size[1] - 1)\n            pos_right = torch.floor(\n                gt_bboxes[:, 0] + (1 + self.sigma) * half_w - 0.5).long(). \\\n                clamp(0, featmap_size[1] - 1)\n            pos_top = torch.ceil(\n                gt_bboxes[:, 1] + (1 - self.sigma) * half_h - 0.5).long(). \\\n                clamp(0, featmap_size[0] - 1)\n            pos_down = torch.floor(\n                gt_bboxes[:, 1] + (1 + self.sigma) * half_h - 0.5).long(). \\\n                clamp(0, featmap_size[0] - 1)\n            for px1, py1, px2, py2, label, (gt_x1, gt_y1, gt_x2, gt_y2) in \\\n                    zip(pos_left, pos_top, pos_right, pos_down, gt_labels,\n                        gt_bboxes_raw[hit_indices, :]):\n                labels[py1:py2 + 1, px1:px2 + 1] = label\n                bbox_targets[py1:py2 + 1, px1:px2 + 1, 0] = \\\n                    (x[py1:py2 + 1, px1:px2 + 1] - gt_x1) / base_len\n                bbox_targets[py1:py2 + 1, px1:px2 + 1, 1] = \\\n                    (y[py1:py2 + 1, px1:px2 + 1] - gt_y1) / base_len\n                bbox_targets[py1:py2 + 1, px1:px2 + 1, 2] = \\\n                    (gt_x2 - x[py1:py2 + 1, px1:px2 + 1]) / base_len\n                bbox_targets[py1:py2 + 1, px1:px2 + 1, 3] = \\\n                    (gt_y2 - y[py1:py2 + 1, px1:px2 + 1]) / base_len\n            bbox_targets = bbox_targets.clamp(min=1. / 16, max=16.)\n            label_list.append(labels)\n            bbox_target_list.append(torch.log(bbox_targets))\n        return label_list, bbox_target_list\n\n    # Same as base_dense_head/_get_bboxes_single except self._bbox_decode\n    def _get_bboxes_single(self,\n                           cls_score_list,\n                           bbox_pred_list,\n                           score_factor_list,\n                           mlvl_priors,\n                           img_meta,\n                           cfg,\n                           rescale=False,\n                           with_nms=True,\n                           **kwargs):\n        \"\"\"Transform outputs of a single image into bbox predictions.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image. Fovea head does not need this value.\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid, has shape\n                (num_priors, 2).\n            img_meta (dict): Image meta info.\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n                is False and mlvl_score_factor is None, return mlvl_bboxes and\n                mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n                mlvl_score_factor. Usually with_nms is False is used for aug\n                test. If with_nms is True, then return the following format\n\n                - det_bboxes (Tensor): Predicted bboxes with shape \\\n                    [num_bboxes, 5], where the first 4 columns are bounding \\\n                    box positions (tl_x, tl_y, br_x, br_y) and the 5-th \\\n                    column are scores between 0 and 1.\n                - det_labels (Tensor): Predicted labels of the corresponding \\\n                    box with shape [num_bboxes].\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(cls_score_list) == len(bbox_pred_list)\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_labels = []\n        for level_idx, (cls_score, bbox_pred, stride, base_len, priors) in \\\n                enumerate(zip(cls_score_list, bbox_pred_list, self.strides,\n                              self.base_edge_list, mlvl_priors)):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n\n            scores = cls_score.permute(1, 2, 0).reshape(\n                -1, self.cls_out_channels).sigmoid()\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(bbox_pred=bbox_pred, priors=priors))\n            scores, labels, _, filtered_results = results\n\n            bbox_pred = filtered_results['bbox_pred']\n            priors = filtered_results['priors']\n\n            bboxes = self._bbox_decode(priors, bbox_pred, base_len, img_shape)\n\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n\n        return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes,\n                                       img_meta['scale_factor'], cfg, rescale,\n                                       with_nms)\n\n    def _bbox_decode(self, priors, bbox_pred, base_len, max_shape):\n        bbox_pred = bbox_pred.exp()\n\n        y = priors[:, 1]\n        x = priors[:, 0]\n        x1 = (x - base_len * bbox_pred[:, 0]). \\\n            clamp(min=0, max=max_shape[1] - 1)\n        y1 = (y - base_len * bbox_pred[:, 1]). \\\n            clamp(min=0, max=max_shape[0] - 1)\n        x2 = (x + base_len * bbox_pred[:, 2]). \\\n            clamp(min=0, max=max_shape[1] - 1)\n        y2 = (y + base_len * bbox_pred[:, 3]). \\\n            clamp(min=0, max=max_shape[0] - 1)\n        decoded_bboxes = torch.stack([x1, y1, x2, y2], -1)\n        return decoded_bboxes\n\n    def _get_points_single(self, *args, **kwargs):\n        \"\"\"Get points according to feature map size.\n\n        This function will be deprecated soon.\n        \"\"\"\n        warnings.warn(\n            '`_get_points_single` in `FoveaHead` will be '\n            'deprecated soon, we support a multi level point generator now'\n            'you can get points of a single level feature map '\n            'with `self.prior_generator.single_level_grid_priors` ')\n        y, x = super()._get_points_single(*args, **kwargs)\n        return y + 0.5, x + 0.5\n"
  },
  {
    "path": "mmdet/models/dense_heads/free_anchor_retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn.functional as F\n\nfrom mmdet.core import bbox_overlaps\nfrom ..builder import HEADS\nfrom .retina_head import RetinaHead\n\nEPS = 1e-12\n\n\n@HEADS.register_module()\nclass FreeAnchorRetinaHead(RetinaHead):\n    \"\"\"FreeAnchor RetinaHead used in https://arxiv.org/abs/1909.02466.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        stacked_convs (int): Number of conv layers in cls and reg tower.\n            Default: 4.\n        conv_cfg (dict): dictionary to construct and config conv layer.\n            Default: None.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Default: norm_cfg=dict(type='GN', num_groups=32,\n            requires_grad=True).\n        pre_anchor_topk (int): Number of boxes that be token in each bag.\n        bbox_thr (float): The threshold of the saturated linear function. It is\n            usually the same with the IoU threshold used in NMS.\n        gamma (float): Gamma parameter in focal loss.\n        alpha (float): Alpha parameter in focal loss.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 stacked_convs=4,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 pre_anchor_topk=50,\n                 bbox_thr=0.6,\n                 gamma=2.0,\n                 alpha=0.5,\n                 **kwargs):\n        super(FreeAnchorRetinaHead,\n              self).__init__(num_classes, in_channels, stacked_convs, conv_cfg,\n                             norm_cfg, **kwargs)\n\n        self.pre_anchor_topk = pre_anchor_topk\n        self.bbox_thr = bbox_thr\n        self.gamma = gamma\n        self.alpha = alpha\n\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            gt_bboxes (list[Tensor]): each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n        device = cls_scores[0].device\n        anchor_list, _ = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        anchors = [torch.cat(anchor) for anchor in anchor_list]\n\n        # concatenate each level\n        cls_scores = [\n            cls.permute(0, 2, 3,\n                        1).reshape(cls.size(0), -1, self.cls_out_channels)\n            for cls in cls_scores\n        ]\n        bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(bbox_pred.size(0), -1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        cls_scores = torch.cat(cls_scores, dim=1)\n        bbox_preds = torch.cat(bbox_preds, dim=1)\n\n        cls_prob = torch.sigmoid(cls_scores)\n        box_prob = []\n        num_pos = 0\n        positive_losses = []\n        for _, (anchors_, gt_labels_, gt_bboxes_, cls_prob_,\n                bbox_preds_) in enumerate(\n                    zip(anchors, gt_labels, gt_bboxes, cls_prob, bbox_preds)):\n\n            with torch.no_grad():\n                if len(gt_bboxes_) == 0:\n                    image_box_prob = torch.zeros(\n                        anchors_.size(0),\n                        self.cls_out_channels).type_as(bbox_preds_)\n                else:\n                    # box_localization: a_{j}^{loc}, shape: [j, 4]\n                    pred_boxes = self.bbox_coder.decode(anchors_, bbox_preds_)\n\n                    # object_box_iou: IoU_{ij}^{loc}, shape: [i, j]\n                    object_box_iou = bbox_overlaps(gt_bboxes_, pred_boxes)\n\n                    # object_box_prob: P{a_{j} -> b_{i}}, shape: [i, j]\n                    t1 = self.bbox_thr\n                    t2 = object_box_iou.max(\n                        dim=1, keepdim=True).values.clamp(min=t1 + 1e-12)\n                    object_box_prob = ((object_box_iou - t1) /\n                                       (t2 - t1)).clamp(\n                                           min=0, max=1)\n\n                    # object_cls_box_prob: P{a_{j} -> b_{i}}, shape: [i, c, j]\n                    num_obj = gt_labels_.size(0)\n                    indices = torch.stack([\n                        torch.arange(num_obj).type_as(gt_labels_), gt_labels_\n                    ],\n                                          dim=0)\n                    object_cls_box_prob = torch.sparse_coo_tensor(\n                        indices, object_box_prob)\n\n                    # image_box_iou: P{a_{j} \\in A_{+}}, shape: [c, j]\n                    \"\"\"\n                    from \"start\" to \"end\" implement:\n                    image_box_iou = torch.sparse.max(object_cls_box_prob,\n                                                     dim=0).t()\n\n                    \"\"\"\n                    # start\n                    box_cls_prob = torch.sparse.sum(\n                        object_cls_box_prob, dim=0).to_dense()\n\n                    indices = torch.nonzero(box_cls_prob, as_tuple=False).t_()\n                    if indices.numel() == 0:\n                        image_box_prob = torch.zeros(\n                            anchors_.size(0),\n                            self.cls_out_channels).type_as(object_box_prob)\n                    else:\n                        nonzero_box_prob = torch.where(\n                            (gt_labels_.unsqueeze(dim=-1) == indices[0]),\n                            object_box_prob[:, indices[1]],\n                            torch.tensor([\n                                0\n                            ]).type_as(object_box_prob)).max(dim=0).values\n\n                        # upmap to shape [j, c]\n                        image_box_prob = torch.sparse_coo_tensor(\n                            indices.flip([0]),\n                            nonzero_box_prob,\n                            size=(anchors_.size(0),\n                                  self.cls_out_channels)).to_dense()\n                    # end\n\n                box_prob.append(image_box_prob)\n\n            # construct bags for objects\n            match_quality_matrix = bbox_overlaps(gt_bboxes_, anchors_)\n            _, matched = torch.topk(\n                match_quality_matrix,\n                self.pre_anchor_topk,\n                dim=1,\n                sorted=False)\n            del match_quality_matrix\n\n            # matched_cls_prob: P_{ij}^{cls}\n            matched_cls_prob = torch.gather(\n                cls_prob_[matched], 2,\n                gt_labels_.view(-1, 1, 1).repeat(1, self.pre_anchor_topk,\n                                                 1)).squeeze(2)\n\n            # matched_box_prob: P_{ij}^{loc}\n            matched_anchors = anchors_[matched]\n            matched_object_targets = self.bbox_coder.encode(\n                matched_anchors,\n                gt_bboxes_.unsqueeze(dim=1).expand_as(matched_anchors))\n            loss_bbox = self.loss_bbox(\n                bbox_preds_[matched],\n                matched_object_targets,\n                reduction_override='none').sum(-1)\n            matched_box_prob = torch.exp(-loss_bbox)\n\n            # positive_losses: {-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )}\n            num_pos += len(gt_bboxes_)\n            positive_losses.append(\n                self.positive_bag_loss(matched_cls_prob, matched_box_prob))\n        positive_loss = torch.cat(positive_losses).sum() / max(1, num_pos)\n\n        # box_prob: P{a_{j} \\in A_{+}}\n        box_prob = torch.stack(box_prob, dim=0)\n\n        # negative_loss:\n        # \\sum_{j}{ FL((1 - P{a_{j} \\in A_{+}}) * (1 - P_{j}^{bg})) } / n||B||\n        negative_loss = self.negative_bag_loss(cls_prob, box_prob).sum() / max(\n            1, num_pos * self.pre_anchor_topk)\n\n        # avoid the absence of gradients in regression subnet\n        # when no ground-truth in a batch\n        if num_pos == 0:\n            positive_loss = bbox_preds.sum() * 0\n\n        losses = {\n            'positive_bag_loss': positive_loss,\n            'negative_bag_loss': negative_loss\n        }\n        return losses\n\n    def positive_bag_loss(self, matched_cls_prob, matched_box_prob):\n        \"\"\"Compute positive bag loss.\n\n        :math:`-log( Mean-max(P_{ij}^{cls} * P_{ij}^{loc}) )`.\n\n        :math:`P_{ij}^{cls}`: matched_cls_prob, classification probability of matched samples.\n\n        :math:`P_{ij}^{loc}`: matched_box_prob, box probability of matched samples.\n\n        Args:\n            matched_cls_prob (Tensor): Classification probability of matched\n                samples in shape (num_gt, pre_anchor_topk).\n            matched_box_prob (Tensor): BBox probability of matched samples,\n                in shape (num_gt, pre_anchor_topk).\n\n        Returns:\n            Tensor: Positive bag loss in shape (num_gt,).\n        \"\"\"  # noqa: E501, W605\n        # bag_prob = Mean-max(matched_prob)\n        matched_prob = matched_cls_prob * matched_box_prob\n        weight = 1 / torch.clamp(1 - matched_prob, 1e-12, None)\n        weight /= weight.sum(dim=1).unsqueeze(dim=-1)\n        bag_prob = (weight * matched_prob).sum(dim=1)\n        # positive_bag_loss = -self.alpha * log(bag_prob)\n        return self.alpha * F.binary_cross_entropy(\n            bag_prob, torch.ones_like(bag_prob), reduction='none')\n\n    def negative_bag_loss(self, cls_prob, box_prob):\n        \"\"\"Compute negative bag loss.\n\n        :math:`FL((1 - P_{a_{j} \\in A_{+}}) * (1 - P_{j}^{bg}))`.\n\n        :math:`P_{a_{j} \\in A_{+}}`: Box_probability of matched samples.\n\n        :math:`P_{j}^{bg}`: Classification probability of negative samples.\n\n        Args:\n            cls_prob (Tensor): Classification probability, in shape\n                (num_img, num_anchors, num_classes).\n            box_prob (Tensor): Box probability, in shape\n                (num_img, num_anchors, num_classes).\n\n        Returns:\n            Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes).\n        \"\"\"  # noqa: E501, W605\n        prob = cls_prob * (1 - box_prob)\n        # There are some cases when neg_prob = 0.\n        # This will cause the neg_prob.log() to be inf without clamp.\n        prob = prob.clamp(min=EPS, max=1 - EPS)\n        negative_bag_loss = prob**self.gamma * F.binary_cross_entropy(\n            prob, torch.zeros_like(prob), reduction='none')\n        return (1 - self.alpha) * negative_bag_loss\n"
  },
  {
    "path": "mmdet/models/dense_heads/fsaf_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply,\n                        unmap)\nfrom ..builder import HEADS\nfrom ..losses.accuracy import accuracy\nfrom ..losses.utils import weight_reduce_loss\nfrom .retina_head import RetinaHead\n\n\n@HEADS.register_module()\nclass FSAFHead(RetinaHead):\n    \"\"\"Anchor-free head used in `FSAF <https://arxiv.org/abs/1903.00621>`_.\n\n    The head contains two subnetworks. The first classifies anchor boxes and\n    the second regresses deltas for the anchors (num_anchors is 1 for anchor-\n    free methods)\n\n    Args:\n        *args: Same as its base class in :class:`RetinaHead`\n        score_threshold (float, optional): The score_threshold to calculate\n            positive recall. If given, prediction scores lower than this value\n            is counted as incorrect prediction. Default to None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n        **kwargs: Same as its base class in :class:`RetinaHead`\n\n    Example:\n        >>> import torch\n        >>> self = FSAFHead(11, 7)\n        >>> x = torch.rand(1, 7, 32, 32)\n        >>> cls_score, bbox_pred = self.forward_single(x)\n        >>> # Each anchor predicts a score for each class except background\n        >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors\n        >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors\n        >>> assert cls_per_anchor == self.num_classes\n        >>> assert box_per_anchor == 4\n    \"\"\"\n\n    def __init__(self, *args, score_threshold=None, init_cfg=None, **kwargs):\n        # The positive bias in self.retina_reg conv is to prevent predicted \\\n        #  bbox with 0 area\n        if init_cfg is None:\n            init_cfg = dict(\n                type='Normal',\n                layer='Conv2d',\n                std=0.01,\n                override=[\n                    dict(\n                        type='Normal',\n                        name='retina_cls',\n                        std=0.01,\n                        bias_prob=0.01),\n                    dict(\n                        type='Normal', name='retina_reg', std=0.01, bias=0.25)\n                ])\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n        self.score_threshold = score_threshold\n\n    def forward_single(self, x):\n        \"\"\"Forward feature map of a single scale level.\n\n        Args:\n            x (Tensor): Feature map of a single scale level.\n\n        Returns:\n            tuple (Tensor):\n                cls_score (Tensor): Box scores for each scale level\n                    Has shape (N, num_points * num_classes, H, W).\n                bbox_pred (Tensor): Box energies / deltas for each scale\n                    level with shape (N, num_points * 4, H, W).\n        \"\"\"\n        cls_score, bbox_pred = super().forward_single(x)\n        # relu: TBLR encoder only accepts positive bbox_pred\n        return cls_score, self.relu(bbox_pred)\n\n    def _get_targets_single(self,\n                            flat_anchors,\n                            valid_flags,\n                            gt_bboxes,\n                            gt_bboxes_ignore,\n                            gt_labels,\n                            img_meta,\n                            label_channels=1,\n                            unmap_outputs=True):\n        \"\"\"Compute regression and classification targets for anchors in a\n        single image.\n\n        Most of the codes are the same with the base class\n          :obj: `AnchorHead`, except that it also collects and returns\n          the matched gt index in the image (from 0 to num_gt-1). If the\n          anchor bbox is not matched to any gt, the corresponding value in\n          pos_gt_inds is -1.\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg.allowed_border)\n        if not inside_flags.any():\n            return (None, ) * 7\n        # Assign gt and sample anchors\n        anchors = flat_anchors[inside_flags.type(torch.bool), :]\n        assign_result = self.assigner.assign(\n            anchors, gt_bboxes, gt_bboxes_ignore,\n            None if self.sampling else gt_labels)\n\n        sampling_result = self.sampler.sample(assign_result, anchors,\n                                              gt_bboxes)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        bbox_weights = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros((num_valid_anchors, label_channels),\n                                          dtype=torch.float)\n        pos_gt_inds = anchors.new_full((num_valid_anchors, ),\n                                       -1,\n                                       dtype=torch.long)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n\n        if len(pos_inds) > 0:\n            if not self.reg_decoded_bbox:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n            else:\n                # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n                # is applied directly on the decoded bounding boxes, both\n                # the predicted boxes and regression targets should be with\n                # absolute coordinate format.\n                pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n            # The assigned gt_index for each anchor. (0-based)\n            pos_gt_inds[pos_inds] = sampling_result.pos_assigned_gt_inds\n            if gt_labels is None:\n                # Only rpn gives gt_labels as None\n                # Foreground is the first class\n                labels[pos_inds] = 0\n            else:\n                labels[pos_inds] = gt_labels[\n                    sampling_result.pos_assigned_gt_inds]\n            if self.train_cfg.pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg.pos_weight\n\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # shadowed_labels is a tensor composed of tuples\n        #  (anchor_inds, class_label) that indicate those anchors lying in the\n        #  outer region of a gt or overlapped by another gt with a smaller\n        #  area.\n        #\n        # Therefore, only the shadowed labels are ignored for loss calculation.\n        # the key `shadowed_labels` is defined in :obj:`CenterRegionAssigner`\n        shadowed_labels = assign_result.get_extra_property('shadowed_labels')\n        if shadowed_labels is not None and shadowed_labels.numel():\n            if len(shadowed_labels.shape) == 2:\n                idx_, label_ = shadowed_labels[:, 0], shadowed_labels[:, 1]\n                assert (labels[idx_] != label_).all(), \\\n                    'One label cannot be both positive and ignored'\n                label_weights[idx_, label_] = 0\n            else:\n                label_weights[shadowed_labels] = 0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            labels = unmap(labels, num_total_anchors, inside_flags)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n            pos_gt_inds = unmap(\n                pos_gt_inds, num_total_anchors, inside_flags, fill=-1)\n\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                neg_inds, sampling_result, pos_gt_inds)\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute loss of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_points * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_points * 4, H, W).\n            gt_bboxes (list[Tensor]): each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        for i in range(len(bbox_preds)):  # loop over fpn level\n            # avoid 0 area of the predicted bbox\n            bbox_preds[i] = bbox_preds[i].clamp(min=1e-4)\n        # TODO: It may directly use the base-class loss function.\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n        batch_size = len(gt_bboxes)\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels)\n        if cls_reg_targets is None:\n            return None\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg,\n         pos_assigned_gt_inds_list) = cls_reg_targets\n\n        num_gts = np.array(list(map(len, gt_labels)))\n        num_total_samples = (\n            num_total_pos + num_total_neg if self.sampling else num_total_pos)\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        # concat all level anchors and flags to a single tensor\n        concat_anchor_list = []\n        for i in range(len(anchor_list)):\n            concat_anchor_list.append(torch.cat(anchor_list[i]))\n        all_anchor_list = images_to_levels(concat_anchor_list,\n                                           num_level_anchors)\n        losses_cls, losses_bbox = multi_apply(\n            self.loss_single,\n            cls_scores,\n            bbox_preds,\n            all_anchor_list,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            num_total_samples=num_total_samples)\n\n        # `pos_assigned_gt_inds_list` (length: fpn_levels) stores the assigned\n        # gt index of each anchor bbox in each fpn level.\n        cum_num_gts = list(np.cumsum(num_gts))  # length of batch_size\n        for i, assign in enumerate(pos_assigned_gt_inds_list):\n            # loop over fpn levels\n            for j in range(1, batch_size):\n                # loop over batch size\n                # Convert gt indices in each img to those in the batch\n                assign[j][assign[j] >= 0] += int(cum_num_gts[j - 1])\n            pos_assigned_gt_inds_list[i] = assign.flatten()\n            labels_list[i] = labels_list[i].flatten()\n        num_gts = sum(map(len, gt_labels))  # total number of gt in the batch\n        # The unique label index of each gt in the batch\n        label_sequence = torch.arange(num_gts, device=device)\n        # Collect the average loss of each gt in each level\n        with torch.no_grad():\n            loss_levels, = multi_apply(\n                self.collect_loss_level_single,\n                losses_cls,\n                losses_bbox,\n                pos_assigned_gt_inds_list,\n                labels_seq=label_sequence)\n            # Shape: (fpn_levels, num_gts). Loss of each gt at each fpn level\n            loss_levels = torch.stack(loss_levels, dim=0)\n            # Locate the best fpn level for loss back-propagation\n            if loss_levels.numel() == 0:  # zero gt\n                argmin = loss_levels.new_empty((num_gts, ), dtype=torch.long)\n            else:\n                _, argmin = loss_levels.min(dim=0)\n\n        # Reweight the loss of each (anchor, label) pair, so that only those\n        #  at the best gt level are back-propagated.\n        losses_cls, losses_bbox, pos_inds = multi_apply(\n            self.reweight_loss_single,\n            losses_cls,\n            losses_bbox,\n            pos_assigned_gt_inds_list,\n            labels_list,\n            list(range(len(losses_cls))),\n            min_levels=argmin)\n        num_pos = torch.cat(pos_inds, 0).sum().float()\n        pos_recall = self.calculate_pos_recall(cls_scores, labels_list,\n                                               pos_inds)\n\n        if num_pos == 0:  # No gt\n            avg_factor = num_pos + float(num_total_neg)\n        else:\n            avg_factor = num_pos\n        for i in range(len(losses_cls)):\n            losses_cls[i] /= avg_factor\n            losses_bbox[i] /= avg_factor\n        return dict(\n            loss_cls=losses_cls,\n            loss_bbox=losses_bbox,\n            num_pos=num_pos / batch_size,\n            pos_recall=pos_recall)\n\n    def calculate_pos_recall(self, cls_scores, labels_list, pos_inds):\n        \"\"\"Calculate positive recall with score threshold.\n\n        Args:\n            cls_scores (list[Tensor]): Classification scores at all fpn levels.\n                Each tensor is in shape (N, num_classes * num_anchors, H, W)\n            labels_list (list[Tensor]): The label that each anchor is assigned\n                to. Shape (N * H * W * num_anchors, )\n            pos_inds (list[Tensor]): List of bool tensors indicating whether\n                the anchor is assigned to a positive label.\n                Shape (N * H * W * num_anchors, )\n\n        Returns:\n            Tensor: A single float number indicating the positive recall.\n        \"\"\"\n        with torch.no_grad():\n            num_class = self.num_classes\n            scores = [\n                cls.permute(0, 2, 3, 1).reshape(-1, num_class)[pos]\n                for cls, pos in zip(cls_scores, pos_inds)\n            ]\n            labels = [\n                label.reshape(-1)[pos]\n                for label, pos in zip(labels_list, pos_inds)\n            ]\n            scores = torch.cat(scores, dim=0)\n            labels = torch.cat(labels, dim=0)\n            if self.use_sigmoid_cls:\n                scores = scores.sigmoid()\n            else:\n                scores = scores.softmax(dim=1)\n\n            return accuracy(scores, labels, thresh=self.score_threshold)\n\n    def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds,\n                                  labels_seq):\n        \"\"\"Get the average loss in each FPN level w.r.t. each gt label.\n\n        Args:\n            cls_loss (Tensor): Classification loss of each feature map pixel,\n              shape (num_anchor, num_class)\n            reg_loss (Tensor): Regression loss of each feature map pixel,\n              shape (num_anchor, 4)\n            assigned_gt_inds (Tensor): It indicates which gt the prior is\n              assigned to (0-based, -1: no assignment). shape (num_anchor),\n            labels_seq: The rank of labels. shape (num_gt)\n\n        Returns:\n            shape: (num_gt), average loss of each gt in this level\n        \"\"\"\n        if len(reg_loss.shape) == 2:  # iou loss has shape (num_prior, 4)\n            reg_loss = reg_loss.sum(dim=-1)  # sum loss in tblr dims\n        if len(cls_loss.shape) == 2:\n            cls_loss = cls_loss.sum(dim=-1)  # sum loss in class dims\n        loss = cls_loss + reg_loss\n        assert loss.size(0) == assigned_gt_inds.size(0)\n        # Default loss value is 1e6 for a layer where no anchor is positive\n        #  to ensure it will not be chosen to back-propagate gradient\n        losses_ = loss.new_full(labels_seq.shape, 1e6)\n        for i, l in enumerate(labels_seq):\n            match = assigned_gt_inds == l\n            if match.any():\n                losses_[i] = loss[match].mean()\n        return losses_,\n\n    def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds,\n                             labels, level, min_levels):\n        \"\"\"Reweight loss values at each level.\n\n        Reassign loss values at each level by masking those where the\n        pre-calculated loss is too large. Then return the reduced losses.\n\n        Args:\n            cls_loss (Tensor): Element-wise classification loss.\n              Shape: (num_anchors, num_classes)\n            reg_loss (Tensor): Element-wise regression loss.\n              Shape: (num_anchors, 4)\n            assigned_gt_inds (Tensor): The gt indices that each anchor bbox\n              is assigned to. -1 denotes a negative anchor, otherwise it is the\n              gt index (0-based). Shape: (num_anchors, ),\n            labels (Tensor): Label assigned to anchors. Shape: (num_anchors, ).\n            level (int): The current level index in the pyramid\n              (0-4 for RetinaNet)\n            min_levels (Tensor): The best-matching level for each gt.\n              Shape: (num_gts, ),\n\n        Returns:\n            tuple:\n                - cls_loss: Reduced corrected classification loss. Scalar.\n                - reg_loss: Reduced corrected regression loss. Scalar.\n                - pos_flags (Tensor): Corrected bool tensor indicating the\n                  final positive anchors. Shape: (num_anchors, ).\n        \"\"\"\n        loc_weight = torch.ones_like(reg_loss)\n        cls_weight = torch.ones_like(cls_loss)\n        pos_flags = assigned_gt_inds >= 0  # positive pixel flag\n        pos_indices = torch.nonzero(pos_flags, as_tuple=False).flatten()\n\n        if pos_flags.any():  # pos pixels exist\n            pos_assigned_gt_inds = assigned_gt_inds[pos_flags]\n            zeroing_indices = (min_levels[pos_assigned_gt_inds] != level)\n            neg_indices = pos_indices[zeroing_indices]\n\n            if neg_indices.numel():\n                pos_flags[neg_indices] = 0\n                loc_weight[neg_indices] = 0\n                # Only the weight corresponding to the label is\n                #  zeroed out if not selected\n                zeroing_labels = labels[neg_indices]\n                assert (zeroing_labels >= 0).all()\n                cls_weight[neg_indices, zeroing_labels] = 0\n\n        # Weighted loss for both cls and reg loss\n        cls_loss = weight_reduce_loss(cls_loss, cls_weight, reduction='sum')\n        reg_loss = weight_reduce_loss(reg_loss, loc_weight, reduction='sum')\n\n        return cls_loss, reg_loss, pos_flags\n"
  },
  {
    "path": "mmdet/models/dense_heads/ga_retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import MaskedConv2d\n\nfrom ..builder import HEADS\nfrom .guided_anchor_head import FeatureAdaption, GuidedAnchorHead\n\n\n@HEADS.register_module()\nclass GARetinaHead(GuidedAnchorHead):\n    \"\"\"Guided-Anchor-based RetinaNet head.\"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 stacked_convs=4,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 init_cfg=None,\n                 **kwargs):\n        if init_cfg is None:\n            init_cfg = dict(\n                type='Normal',\n                layer='Conv2d',\n                std=0.01,\n                override=[\n                    dict(\n                        type='Normal',\n                        name='conv_loc',\n                        std=0.01,\n                        bias_prob=0.01),\n                    dict(\n                        type='Normal',\n                        name='retina_cls',\n                        std=0.01,\n                        bias_prob=0.01)\n                ])\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        super(GARetinaHead, self).__init__(\n            num_classes, in_channels, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n\n        self.conv_loc = nn.Conv2d(self.feat_channels, 1, 1)\n        self.conv_shape = nn.Conv2d(self.feat_channels, self.num_anchors * 2,\n                                    1)\n        self.feature_adaption_cls = FeatureAdaption(\n            self.feat_channels,\n            self.feat_channels,\n            kernel_size=3,\n            deform_groups=self.deform_groups)\n        self.feature_adaption_reg = FeatureAdaption(\n            self.feat_channels,\n            self.feat_channels,\n            kernel_size=3,\n            deform_groups=self.deform_groups)\n        self.retina_cls = MaskedConv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.retina_reg = MaskedConv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n\n    def forward_single(self, x):\n        \"\"\"Forward feature map of a single scale level.\"\"\"\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n\n        loc_pred = self.conv_loc(cls_feat)\n        shape_pred = self.conv_shape(reg_feat)\n\n        cls_feat = self.feature_adaption_cls(cls_feat, shape_pred)\n        reg_feat = self.feature_adaption_reg(reg_feat, shape_pred)\n\n        if not self.training:\n            mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr\n        else:\n            mask = None\n        cls_score = self.retina_cls(cls_feat, mask)\n        bbox_pred = self.retina_reg(reg_feat, mask)\n        return cls_score, bbox_pred, shape_pred, loc_pred\n"
  },
  {
    "path": "mmdet/models/dense_heads/ga_rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv import ConfigDict\nfrom mmcv.ops import nms\n\nfrom ..builder import HEADS\nfrom .guided_anchor_head import GuidedAnchorHead\n\n\n@HEADS.register_module()\nclass GARPNHead(GuidedAnchorHead):\n    \"\"\"Guided-Anchor-based RPN head.\"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='conv_loc',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs):\n        super(GARPNHead, self).__init__(\n            1, in_channels, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.rpn_conv = nn.Conv2d(\n            self.in_channels, self.feat_channels, 3, padding=1)\n        super(GARPNHead, self)._init_layers()\n\n    def forward_single(self, x):\n        \"\"\"Forward feature of a single scale level.\"\"\"\n\n        x = self.rpn_conv(x)\n        x = F.relu(x, inplace=True)\n        (cls_score, bbox_pred, shape_pred,\n         loc_pred) = super(GARPNHead, self).forward_single(x)\n        return cls_score, bbox_pred, shape_pred, loc_pred\n\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             shape_preds,\n             loc_preds,\n             gt_bboxes,\n             img_metas,\n             gt_bboxes_ignore=None):\n        losses = super(GARPNHead, self).loss(\n            cls_scores,\n            bbox_preds,\n            shape_preds,\n            loc_preds,\n            gt_bboxes,\n            None,\n            img_metas,\n            gt_bboxes_ignore=gt_bboxes_ignore)\n        return dict(\n            loss_rpn_cls=losses['loss_cls'],\n            loss_rpn_bbox=losses['loss_bbox'],\n            loss_anchor_shape=losses['loss_shape'],\n            loss_anchor_loc=losses['loss_loc'])\n\n    def _get_bboxes_single(self,\n                           cls_scores,\n                           bbox_preds,\n                           mlvl_anchors,\n                           mlvl_masks,\n                           img_shape,\n                           scale_factor,\n                           cfg,\n                           rescale=False):\n        cfg = self.test_cfg if cfg is None else cfg\n\n        cfg = copy.deepcopy(cfg)\n\n        # deprecate arguments warning\n        if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg:\n            warnings.warn(\n                'In rpn_proposal or test_cfg, '\n                'nms_thr has been moved to a dict named nms as '\n                'iou_threshold, max_num has been renamed as max_per_img, '\n                'name of original arguments and the way to specify '\n                'iou_threshold of NMS will be deprecated.')\n        if 'nms' not in cfg:\n            cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr))\n        if 'max_num' in cfg:\n            if 'max_per_img' in cfg:\n                assert cfg.max_num == cfg.max_per_img, f'You ' \\\n                    f'set max_num and max_per_img at the same time, ' \\\n                    f'but get {cfg.max_num} ' \\\n                    f'and {cfg.max_per_img} respectively' \\\n                    'Please delete max_num which will be deprecated.'\n            else:\n                cfg.max_per_img = cfg.max_num\n        if 'nms_thr' in cfg:\n            assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \\\n                f'iou_threshold in nms and ' \\\n                f'nms_thr at the same time, but get ' \\\n                f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \\\n                f' respectively. Please delete the ' \\\n                f'nms_thr which will be deprecated.'\n\n        assert cfg.nms.get('type', 'nms') == 'nms', 'GARPNHead only support ' \\\n            'naive nms.'\n\n        mlvl_proposals = []\n        for idx in range(len(cls_scores)):\n            rpn_cls_score = cls_scores[idx]\n            rpn_bbox_pred = bbox_preds[idx]\n            anchors = mlvl_anchors[idx]\n            mask = mlvl_masks[idx]\n            assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]\n            # if no location is kept, end.\n            if mask.sum() == 0:\n                continue\n            rpn_cls_score = rpn_cls_score.permute(1, 2, 0)\n            if self.use_sigmoid_cls:\n                rpn_cls_score = rpn_cls_score.reshape(-1)\n                scores = rpn_cls_score.sigmoid()\n            else:\n                rpn_cls_score = rpn_cls_score.reshape(-1, 2)\n                # remind that we set FG labels to [0, num_class-1]\n                # since mmdet v2.0\n                # BG cat_id: num_class\n                scores = rpn_cls_score.softmax(dim=1)[:, :-1]\n            # filter scores, bbox_pred w.r.t. mask.\n            # anchors are filtered in get_anchors() beforehand.\n            scores = scores[mask]\n            rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1,\n                                                                   4)[mask, :]\n            if scores.dim() == 0:\n                rpn_bbox_pred = rpn_bbox_pred.unsqueeze(0)\n                anchors = anchors.unsqueeze(0)\n                scores = scores.unsqueeze(0)\n            # filter anchors, bbox_pred, scores w.r.t. scores\n            if cfg.nms_pre > 0 and scores.shape[0] > cfg.nms_pre:\n                _, topk_inds = scores.topk(cfg.nms_pre)\n                rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]\n                anchors = anchors[topk_inds, :]\n                scores = scores[topk_inds]\n            # get proposals w.r.t. anchors and rpn_bbox_pred\n            proposals = self.bbox_coder.decode(\n                anchors, rpn_bbox_pred, max_shape=img_shape)\n            # filter out too small bboxes\n            if cfg.min_bbox_size >= 0:\n                w = proposals[:, 2] - proposals[:, 0]\n                h = proposals[:, 3] - proposals[:, 1]\n                valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)\n                if not valid_mask.all():\n                    proposals = proposals[valid_mask]\n                    scores = scores[valid_mask]\n\n            # NMS in current level\n            proposals, _ = nms(proposals, scores, cfg.nms.iou_threshold)\n            proposals = proposals[:cfg.nms_post, :]\n            mlvl_proposals.append(proposals)\n        proposals = torch.cat(mlvl_proposals, 0)\n        if cfg.get('nms_across_levels', False):\n            # NMS across multi levels\n            proposals, _ = nms(proposals[:, :4], proposals[:, -1],\n                               cfg.nms.iou_threshold)\n            proposals = proposals[:cfg.max_per_img, :]\n        else:\n            scores = proposals[:, 4]\n            num = min(cfg.max_per_img, proposals.shape[0])\n            _, topk_inds = scores.topk(num)\n            proposals = proposals[topk_inds, :]\n        return proposals\n"
  },
  {
    "path": "mmdet/models/dense_heads/gfl_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, Scale\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (anchor_inside_flags, bbox_overlaps, build_assigner,\n                        build_sampler, images_to_levels, multi_apply,\n                        reduce_mean, unmap)\nfrom mmdet.core.utils import filter_scores_and_topk\nfrom ..builder import HEADS, build_loss\nfrom .anchor_head import AnchorHead\n\n\nclass Integral(nn.Module):\n    \"\"\"A fixed layer for calculating integral result from distribution.\n\n    This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,\n    P(y_i) denotes the softmax vector that represents the discrete distribution\n    y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}\n\n    Args:\n        reg_max (int): The maximal value of the discrete set. Default: 16. You\n            may want to reset it according to your new dataset or related\n            settings.\n    \"\"\"\n\n    def __init__(self, reg_max=16):\n        super(Integral, self).__init__()\n        self.reg_max = reg_max\n        self.register_buffer('project',\n                             torch.linspace(0, self.reg_max, self.reg_max + 1))\n\n    def forward(self, x):\n        \"\"\"Forward feature from the regression head to get integral result of\n        bounding box location.\n\n        Args:\n            x (Tensor): Features of the regression head, shape (N, 4*(n+1)),\n                n is self.reg_max.\n\n        Returns:\n            x (Tensor): Integral result of box locations, i.e., distance\n                offsets from the box center in four directions, shape (N, 4).\n        \"\"\"\n        x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)\n        x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)\n        return x\n\n\n@HEADS.register_module()\nclass GFLHead(AnchorHead):\n    \"\"\"Generalized Focal Loss: Learning Qualified and Distributed Bounding\n    Boxes for Dense Object Detection.\n\n    GFL head structure is similar with ATSS, however GFL uses\n    1) joint representation for classification and localization quality, and\n    2) flexible General distribution for bounding box locations,\n    which are supervised by\n    Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively\n\n    https://arxiv.org/abs/2006.04388\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        stacked_convs (int): Number of conv layers in cls and reg tower.\n            Default: 4.\n        conv_cfg (dict): dictionary to construct and config conv layer.\n            Default: None.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Default: dict(type='GN', num_groups=32, requires_grad=True).\n        loss_qfl (dict): Config of Quality Focal Loss (QFL).\n        bbox_coder (dict): Config of bbox coder. Defaults\n            'DistancePointBBoxCoder'.\n        reg_max (int): Max value of integral set :math: `{0, ..., reg_max}`\n            in QFL setting. Default: 16.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    Example:\n        >>> self = GFLHead(11, 7)\n        >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]\n        >>> cls_quality_score, bbox_pred = self.forward(feats)\n        >>> assert len(cls_quality_score) == len(self.scales)\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 stacked_convs=4,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n                 loss_dfl=dict(type='DistributionFocalLoss', loss_weight=0.25),\n                 bbox_coder=dict(type='DistancePointBBoxCoder'),\n                 reg_max=16,\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='gfl_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs):\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.reg_max = reg_max\n        super(GFLHead, self).__init__(\n            num_classes,\n            in_channels,\n            bbox_coder=bbox_coder,\n            init_cfg=init_cfg,\n            **kwargs)\n\n        self.sampling = False\n        if self.train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            # SSD sampling=False so use PseudoSampler\n            sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n\n        self.integral = Integral(self.reg_max)\n        self.loss_dfl = build_loss(loss_dfl)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        assert self.num_anchors == 1, 'anchor free version'\n        self.gfl_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n        self.gfl_reg = nn.Conv2d(\n            self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)\n        self.scales = nn.ModuleList(\n            [Scale(1.0) for _ in self.prior_generator.strides])\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n                cls_scores (list[Tensor]): Classification and quality (IoU)\n                    joint scores for all scale levels, each is a 4D-tensor,\n                    the channel number is num_classes.\n                bbox_preds (list[Tensor]): Box distribution logits for all\n                    scale levels, each is a 4D-tensor, the channel number is\n                    4*(n+1), n is max value of integral set.\n        \"\"\"\n        return multi_apply(self.forward_single, feats, self.scales)\n\n    def forward_single(self, x, scale):\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n            scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n\n        Returns:\n            tuple:\n                cls_score (Tensor): Cls and quality joint scores for a single\n                    scale level the channel number is num_classes.\n                bbox_pred (Tensor): Box distribution logits for a single scale\n                    level, the channel number is 4*(n+1), n is max value of\n                    integral set.\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n        cls_score = self.gfl_cls(cls_feat)\n        bbox_pred = scale(self.gfl_reg(reg_feat)).float()\n        return cls_score, bbox_pred\n\n    def anchor_center(self, anchors):\n        \"\"\"Get anchor centers from anchors.\n\n        Args:\n            anchors (Tensor): Anchor list with shape (N, 4), \"xyxy\" format.\n\n        Returns:\n            Tensor: Anchor centers with shape (N, 2), \"xy\" format.\n        \"\"\"\n        anchors_cx = (anchors[..., 2] + anchors[..., 0]) / 2\n        anchors_cy = (anchors[..., 3] + anchors[..., 1]) / 2\n        return torch.stack([anchors_cx, anchors_cy], dim=-1)\n\n    def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,\n                    bbox_targets, stride, num_total_samples):\n        \"\"\"Compute loss of a single scale level.\n\n        Args:\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            cls_score (Tensor): Cls and quality joint scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_pred (Tensor): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (N, num_total_anchors, 4).\n            stride (tuple): Stride in this scale level.\n            num_total_samples (int): Number of positive samples that is\n                reduced over all GPUs.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert stride[0] == stride[1], 'h stride is not equal to w stride!'\n        anchors = anchors.reshape(-1, 4)\n        cls_score = cls_score.permute(0, 2, 3,\n                                      1).reshape(-1, self.cls_out_channels)\n        bbox_pred = bbox_pred.permute(0, 2, 3,\n                                      1).reshape(-1, 4 * (self.reg_max + 1))\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    & (labels < bg_class_ind)).nonzero().squeeze(1)\n        score = label_weights.new_zeros(labels.shape)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n            pos_anchors = anchors[pos_inds]\n            pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]\n\n            weight_targets = cls_score.detach().sigmoid()\n            weight_targets = weight_targets.max(dim=1)[0][pos_inds]\n            pos_bbox_pred_corners = self.integral(pos_bbox_pred)\n            pos_decode_bbox_pred = self.bbox_coder.decode(\n                pos_anchor_centers, pos_bbox_pred_corners)\n            pos_decode_bbox_targets = pos_bbox_targets / stride[0]\n            score[pos_inds] = bbox_overlaps(\n                pos_decode_bbox_pred.detach(),\n                pos_decode_bbox_targets,\n                is_aligned=True)\n            pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)\n            target_corners = self.bbox_coder.encode(pos_anchor_centers,\n                                                    pos_decode_bbox_targets,\n                                                    self.reg_max).reshape(-1)\n\n            # regression loss\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_decode_bbox_targets,\n                weight=weight_targets,\n                avg_factor=1.0)\n\n            # dfl loss\n            loss_dfl = self.loss_dfl(\n                pred_corners,\n                target_corners,\n                weight=weight_targets[:, None].expand(-1, 4).reshape(-1),\n                avg_factor=4.0)\n        else:\n            loss_bbox = bbox_pred.sum() * 0\n            loss_dfl = bbox_pred.sum() * 0\n            weight_targets = bbox_pred.new_tensor(0)\n\n        # cls (qfl) loss\n        loss_cls = self.loss_cls(\n            cls_score, (labels, score),\n            weight=label_weights,\n            avg_factor=num_total_samples)\n\n        return loss_cls, loss_bbox, loss_dfl, weight_targets.sum()\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Cls and quality scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_preds (list[Tensor]): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor] | None): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels)\n        if cls_reg_targets is None:\n            return None\n\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets\n\n        num_total_samples = reduce_mean(\n            torch.tensor(num_total_pos, dtype=torch.float,\n                         device=device)).item()\n        num_total_samples = max(num_total_samples, 1.0)\n\n        losses_cls, losses_bbox, losses_dfl,\\\n            avg_factor = multi_apply(\n                self.loss_single,\n                anchor_list,\n                cls_scores,\n                bbox_preds,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                self.prior_generator.strides,\n                num_total_samples=num_total_samples)\n\n        avg_factor = sum(avg_factor)\n        avg_factor = reduce_mean(avg_factor).clamp_(min=1).item()\n        losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))\n        losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))\n        return dict(\n            loss_cls=losses_cls, loss_bbox=losses_bbox, loss_dfl=losses_dfl)\n\n    def _get_bboxes_single(self,\n                           cls_score_list,\n                           bbox_pred_list,\n                           score_factor_list,\n                           mlvl_priors,\n                           img_meta,\n                           cfg,\n                           rescale=False,\n                           with_nms=True,\n                           **kwargs):\n        \"\"\"Transform outputs of a single image into bbox predictions.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image. GFL head does not need this value.\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid, has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n                is False and mlvl_score_factor is None, return mlvl_bboxes and\n                mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n                mlvl_score_factor. Usually with_nms is False is used for aug\n                test. If with_nms is True, then return the following format\n\n                - det_bboxes (Tensor): Predicted bboxes with shape \\\n                    [num_bboxes, 5], where the first 4 columns are bounding \\\n                    box positions (tl_x, tl_y, br_x, br_y) and the 5-th \\\n                    column are scores between 0 and 1.\n                - det_labels (Tensor): Predicted labels of the corresponding \\\n                    box with shape [num_bboxes].\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_labels = []\n        for level_idx, (cls_score, bbox_pred, stride, priors) in enumerate(\n                zip(cls_score_list, bbox_pred_list,\n                    self.prior_generator.strides, mlvl_priors)):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n            assert stride[0] == stride[1]\n\n            bbox_pred = bbox_pred.permute(1, 2, 0)\n            bbox_pred = self.integral(bbox_pred) * stride[0]\n\n            scores = cls_score.permute(1, 2, 0).reshape(\n                -1, self.cls_out_channels).sigmoid()\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(bbox_pred=bbox_pred, priors=priors))\n            scores, labels, _, filtered_results = results\n\n            bbox_pred = filtered_results['bbox_pred']\n            priors = filtered_results['priors']\n\n            bboxes = self.bbox_coder.decode(\n                self.anchor_center(priors), bbox_pred, max_shape=img_shape)\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n\n        return self._bbox_post_process(\n            mlvl_scores,\n            mlvl_labels,\n            mlvl_bboxes,\n            img_meta['scale_factor'],\n            cfg,\n            rescale=rescale,\n            with_nms=with_nms)\n\n    def get_targets(self,\n                    anchor_list,\n                    valid_flag_list,\n                    gt_bboxes_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None,\n                    gt_labels_list=None,\n                    label_channels=1,\n                    unmap_outputs=True):\n        \"\"\"Get targets for GFL head.\n\n        This method is almost the same as `AnchorHead.get_targets()`. Besides\n        returning the targets as the parent method does, it also returns the\n        anchors as the first element of the returned tuple.\n        \"\"\"\n        num_imgs = len(img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        num_level_anchors_list = [num_level_anchors] * num_imgs\n\n        # concat all level anchors and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            anchor_list[i] = torch.cat(anchor_list[i])\n            valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n        if gt_labels_list is None:\n            gt_labels_list = [None for _ in range(num_imgs)]\n        (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n         all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(\n             self._get_target_single,\n             anchor_list,\n             valid_flag_list,\n             num_level_anchors_list,\n             gt_bboxes_list,\n             gt_bboxes_ignore_list,\n             gt_labels_list,\n             img_metas,\n             label_channels=label_channels,\n             unmap_outputs=unmap_outputs)\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n        # sampled anchors of all images\n        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors)\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_anchors)\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, bbox_weights_list, num_total_pos,\n                num_total_neg)\n\n    def _get_target_single(self,\n                           flat_anchors,\n                           valid_flags,\n                           num_level_anchors,\n                           gt_bboxes,\n                           gt_bboxes_ignore,\n                           gt_labels,\n                           img_meta,\n                           label_channels=1,\n                           unmap_outputs=True):\n        \"\"\"Compute regression, classification targets for anchors in a single\n        image.\n\n        Args:\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors, 4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors,).\n            num_level_anchors Tensor): Number of anchors of each scale level.\n            gt_bboxes (Tensor): Ground truth bboxes of the image,\n                shape (num_gts, 4).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts,).\n            img_meta (dict): Meta info of the image.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: N is the number of total anchors in the image.\n                anchors (Tensor): All anchors in the image with shape (N, 4).\n                labels (Tensor): Labels of all anchors in the image with shape\n                    (N,).\n                label_weights (Tensor): Label weights of all anchor in the\n                    image with shape (N,).\n                bbox_targets (Tensor): BBox targets of all anchors in the\n                    image with shape (N, 4).\n                bbox_weights (Tensor): BBox weights of all anchors in the\n                    image with shape (N, 4).\n                pos_inds (Tensor): Indices of positive anchor with shape\n                    (num_pos,).\n                neg_inds (Tensor): Indices of negative anchor with shape\n                    (num_neg,).\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg.allowed_border)\n        if not inside_flags.any():\n            return (None, ) * 7\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n\n        num_level_anchors_inside = self.get_num_level_anchors_inside(\n            num_level_anchors, inside_flags)\n        assign_result = self.assigner.assign(anchors, num_level_anchors_inside,\n                                             gt_bboxes, gt_bboxes_ignore,\n                                             gt_labels)\n\n        sampling_result = self.sampler.sample(assign_result, anchors,\n                                              gt_bboxes)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        bbox_weights = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1.0\n            if gt_labels is None:\n                # Only rpn gives gt_labels as None\n                # Foreground is the first class\n                labels[pos_inds] = 0\n            else:\n                labels[pos_inds] = gt_labels[\n                    sampling_result.pos_assigned_gt_inds]\n            if self.train_cfg.pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg.pos_weight\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            anchors = unmap(anchors, num_total_anchors, inside_flags)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n        return (anchors, labels, label_weights, bbox_targets, bbox_weights,\n                pos_inds, neg_inds)\n\n    def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):\n        split_inside_flags = torch.split(inside_flags, num_level_anchors)\n        num_level_anchors_inside = [\n            int(flags.sum()) for flags in split_inside_flags\n        ]\n        return num_level_anchors_inside\n"
  },
  {
    "path": "mmdet/models/dense_heads/guided_anchor_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.ops import DeformConv2d, MaskedConv2d\nfrom mmcv.runner import BaseModule, force_fp32\n\nfrom mmdet.core import (anchor_inside_flags, build_assigner, build_bbox_coder,\n                        build_prior_generator, build_sampler, calc_region,\n                        images_to_levels, multi_apply, multiclass_nms, unmap)\nfrom ..builder import HEADS, build_loss\nfrom .anchor_head import AnchorHead\n\n\nclass FeatureAdaption(BaseModule):\n    \"\"\"Feature Adaption Module.\n\n    Feature Adaption Module is implemented based on DCN v1.\n    It uses anchor shape prediction rather than feature map to\n    predict offsets of deform conv layer.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        out_channels (int): Number of channels in the output feature map.\n        kernel_size (int): Deformable conv kernel size.\n        deform_groups (int): Deformable conv group size.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_size=3,\n                 deform_groups=4,\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.1,\n                     override=dict(\n                         type='Normal', name='conv_adaption', std=0.01))):\n        super(FeatureAdaption, self).__init__(init_cfg)\n        offset_channels = kernel_size * kernel_size * 2\n        self.conv_offset = nn.Conv2d(\n            2, deform_groups * offset_channels, 1, bias=False)\n        self.conv_adaption = DeformConv2d(\n            in_channels,\n            out_channels,\n            kernel_size=kernel_size,\n            padding=(kernel_size - 1) // 2,\n            deform_groups=deform_groups)\n        self.relu = nn.ReLU(inplace=True)\n\n    def forward(self, x, shape):\n        offset = self.conv_offset(shape.detach())\n        x = self.relu(self.conv_adaption(x, offset))\n        return x\n\n\n@HEADS.register_module()\nclass GuidedAnchorHead(AnchorHead):\n    \"\"\"Guided-Anchor-based head (GA-RPN, GA-RetinaNet, etc.).\n\n    This GuidedAnchorHead will predict high-quality feature guided\n    anchors and locations where anchors will be kept in inference.\n    There are mainly 3 categories of bounding-boxes.\n\n    - Sampled 9 pairs for target assignment. (approxes)\n    - The square boxes where the predicted anchors are based on. (squares)\n    - Guided anchors.\n\n    Please refer to https://arxiv.org/abs/1901.03278 for more details.\n\n    Args:\n        num_classes (int): Number of classes.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels.\n        approx_anchor_generator (dict): Config dict for approx generator\n        square_anchor_generator (dict): Config dict for square generator\n        anchor_coder (dict): Config dict for anchor coder\n        bbox_coder (dict): Config dict for bbox coder\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Default False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        deform_groups: (int): Group number of DCN in\n            FeatureAdaption module.\n        loc_filter_thr (float): Threshold to filter out unconcerned regions.\n        loss_loc (dict): Config of location loss.\n        loss_shape (dict): Config of anchor shape loss.\n        loss_cls (dict): Config of classification loss.\n        loss_bbox (dict): Config of bbox regression loss.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n            self,\n            num_classes,\n            in_channels,\n            feat_channels=256,\n            approx_anchor_generator=dict(\n                type='AnchorGenerator',\n                octave_base_scale=8,\n                scales_per_octave=3,\n                ratios=[0.5, 1.0, 2.0],\n                strides=[4, 8, 16, 32, 64]),\n            square_anchor_generator=dict(\n                type='AnchorGenerator',\n                ratios=[1.0],\n                scales=[8],\n                strides=[4, 8, 16, 32, 64]),\n            anchor_coder=dict(\n                type='DeltaXYWHBBoxCoder',\n                target_means=[.0, .0, .0, .0],\n                target_stds=[1.0, 1.0, 1.0, 1.0]\n            ),\n            bbox_coder=dict(\n                type='DeltaXYWHBBoxCoder',\n                target_means=[.0, .0, .0, .0],\n                target_stds=[1.0, 1.0, 1.0, 1.0]\n            ),\n            reg_decoded_bbox=False,\n            deform_groups=4,\n            loc_filter_thr=0.01,\n            train_cfg=None,\n            test_cfg=None,\n            loss_loc=dict(\n                type='FocalLoss',\n                use_sigmoid=True,\n                gamma=2.0,\n                alpha=0.25,\n                loss_weight=1.0),\n            loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),\n            loss_cls=dict(\n                type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),\n            loss_bbox=dict(type='SmoothL1Loss', beta=1.0,\n                           loss_weight=1.0),\n            init_cfg=dict(type='Normal', layer='Conv2d', std=0.01,\n                          override=dict(type='Normal',\n                                        name='conv_loc',\n                                        std=0.01,\n                                        bias_prob=0.01))):  # yapf: disable\n        super(AnchorHead, self).__init__(init_cfg)\n        self.in_channels = in_channels\n        self.num_classes = num_classes\n        self.feat_channels = feat_channels\n        self.deform_groups = deform_groups\n        self.loc_filter_thr = loc_filter_thr\n\n        # build approx_anchor_generator and square_anchor_generator\n        assert (approx_anchor_generator['octave_base_scale'] ==\n                square_anchor_generator['scales'][0])\n        assert (approx_anchor_generator['strides'] ==\n                square_anchor_generator['strides'])\n        self.approx_anchor_generator = build_prior_generator(\n            approx_anchor_generator)\n        self.square_anchor_generator = build_prior_generator(\n            square_anchor_generator)\n        self.approxs_per_octave = self.approx_anchor_generator \\\n            .num_base_priors[0]\n\n        self.reg_decoded_bbox = reg_decoded_bbox\n\n        # one anchor per location\n        self.num_base_priors = self.square_anchor_generator.num_base_priors[0]\n\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n        self.loc_focal_loss = loss_loc['type'] in ['FocalLoss']\n        self.sampling = loss_cls['type'] not in ['FocalLoss']\n        self.ga_sampling = train_cfg is not None and hasattr(\n            train_cfg, 'ga_sampler')\n        if self.use_sigmoid_cls:\n            self.cls_out_channels = self.num_classes\n        else:\n            self.cls_out_channels = self.num_classes + 1\n\n        # build bbox_coder\n        self.anchor_coder = build_bbox_coder(anchor_coder)\n        self.bbox_coder = build_bbox_coder(bbox_coder)\n\n        # build losses\n        self.loss_loc = build_loss(loss_loc)\n        self.loss_shape = build_loss(loss_shape)\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_bbox = build_loss(loss_bbox)\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n        if self.train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            # use PseudoSampler when sampling is False\n            if self.sampling and hasattr(self.train_cfg, 'sampler'):\n                sampler_cfg = self.train_cfg.sampler\n            else:\n                sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n\n            self.ga_assigner = build_assigner(self.train_cfg.ga_assigner)\n            if self.ga_sampling:\n                ga_sampler_cfg = self.train_cfg.ga_sampler\n            else:\n                ga_sampler_cfg = dict(type='PseudoSampler')\n            self.ga_sampler = build_sampler(ga_sampler_cfg, context=self)\n\n        self.fp16_enabled = False\n\n        self._init_layers()\n\n    @property\n    def num_anchors(self):\n        warnings.warn('DeprecationWarning: `num_anchors` is deprecated, '\n                      'please use \"num_base_priors\" instead')\n        return self.square_anchor_generator.num_base_priors[0]\n\n    def _init_layers(self):\n        self.relu = nn.ReLU(inplace=True)\n        self.conv_loc = nn.Conv2d(self.in_channels, 1, 1)\n        self.conv_shape = nn.Conv2d(self.in_channels, self.num_base_priors * 2,\n                                    1)\n        self.feature_adaption = FeatureAdaption(\n            self.in_channels,\n            self.feat_channels,\n            kernel_size=3,\n            deform_groups=self.deform_groups)\n        self.conv_cls = MaskedConv2d(\n            self.feat_channels, self.num_base_priors * self.cls_out_channels,\n            1)\n        self.conv_reg = MaskedConv2d(self.feat_channels,\n                                     self.num_base_priors * 4, 1)\n\n    def forward_single(self, x):\n        loc_pred = self.conv_loc(x)\n        shape_pred = self.conv_shape(x)\n        x = self.feature_adaption(x, shape_pred)\n        # masked conv is only used during inference for speed-up\n        if not self.training:\n            mask = loc_pred.sigmoid()[0] >= self.loc_filter_thr\n        else:\n            mask = None\n        cls_score = self.conv_cls(x, mask)\n        bbox_pred = self.conv_reg(x, mask)\n        return cls_score, bbox_pred, shape_pred, loc_pred\n\n    def forward(self, feats):\n        return multi_apply(self.forward_single, feats)\n\n    def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'):\n        \"\"\"Get sampled approxs and inside flags according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            img_metas (list[dict]): Image meta info.\n            device (torch.device | str): device for returned tensors\n\n        Returns:\n            tuple: approxes of each image, inside flags of each image\n        \"\"\"\n        num_imgs = len(img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # approxes for one time\n        multi_level_approxs = self.approx_anchor_generator.grid_priors(\n            featmap_sizes, device=device)\n        approxs_list = [multi_level_approxs for _ in range(num_imgs)]\n\n        # for each image, we compute inside flags of multi level approxes\n        inside_flag_list = []\n        for img_id, img_meta in enumerate(img_metas):\n            multi_level_flags = []\n            multi_level_approxs = approxs_list[img_id]\n\n            # obtain valid flags for each approx first\n            multi_level_approx_flags = self.approx_anchor_generator \\\n                .valid_flags(featmap_sizes,\n                             img_meta['pad_shape'],\n                             device=device)\n\n            for i, flags in enumerate(multi_level_approx_flags):\n                approxs = multi_level_approxs[i]\n                inside_flags_list = []\n                for i in range(self.approxs_per_octave):\n                    split_valid_flags = flags[i::self.approxs_per_octave]\n                    split_approxs = approxs[i::self.approxs_per_octave, :]\n                    inside_flags = anchor_inside_flags(\n                        split_approxs, split_valid_flags,\n                        img_meta['img_shape'][:2],\n                        self.train_cfg.allowed_border)\n                    inside_flags_list.append(inside_flags)\n                # inside_flag for a position is true if any anchor in this\n                # position is true\n                inside_flags = (\n                    torch.stack(inside_flags_list, 0).sum(dim=0) > 0)\n                multi_level_flags.append(inside_flags)\n            inside_flag_list.append(multi_level_flags)\n        return approxs_list, inside_flag_list\n\n    def get_anchors(self,\n                    featmap_sizes,\n                    shape_preds,\n                    loc_preds,\n                    img_metas,\n                    use_loc_filter=False,\n                    device='cuda'):\n        \"\"\"Get squares according to feature map sizes and guided anchors.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            shape_preds (list[tensor]): Multi-level shape predictions.\n            loc_preds (list[tensor]): Multi-level location predictions.\n            img_metas (list[dict]): Image meta info.\n            use_loc_filter (bool): Use loc filter or not.\n            device (torch.device | str): device for returned tensors\n\n        Returns:\n            tuple: square approxs of each image, guided anchors of each image,\n                loc masks of each image\n        \"\"\"\n        num_imgs = len(img_metas)\n        num_levels = len(featmap_sizes)\n\n        # since feature map sizes of all images are the same, we only compute\n        # squares for one time\n        multi_level_squares = self.square_anchor_generator.grid_priors(\n            featmap_sizes, device=device)\n        squares_list = [multi_level_squares for _ in range(num_imgs)]\n\n        # for each image, we compute multi level guided anchors\n        guided_anchors_list = []\n        loc_mask_list = []\n        for img_id, img_meta in enumerate(img_metas):\n            multi_level_guided_anchors = []\n            multi_level_loc_mask = []\n            for i in range(num_levels):\n                squares = squares_list[img_id][i]\n                shape_pred = shape_preds[i][img_id]\n                loc_pred = loc_preds[i][img_id]\n                guided_anchors, loc_mask = self._get_guided_anchors_single(\n                    squares,\n                    shape_pred,\n                    loc_pred,\n                    use_loc_filter=use_loc_filter)\n                multi_level_guided_anchors.append(guided_anchors)\n                multi_level_loc_mask.append(loc_mask)\n            guided_anchors_list.append(multi_level_guided_anchors)\n            loc_mask_list.append(multi_level_loc_mask)\n        return squares_list, guided_anchors_list, loc_mask_list\n\n    def _get_guided_anchors_single(self,\n                                   squares,\n                                   shape_pred,\n                                   loc_pred,\n                                   use_loc_filter=False):\n        \"\"\"Get guided anchors and loc masks for a single level.\n\n        Args:\n            square (tensor): Squares of a single level.\n            shape_pred (tensor): Shape predictions of a single level.\n            loc_pred (tensor): Loc predictions of a single level.\n            use_loc_filter (list[tensor]): Use loc filter or not.\n\n        Returns:\n            tuple: guided anchors, location masks\n        \"\"\"\n        # calculate location filtering mask\n        loc_pred = loc_pred.sigmoid().detach()\n        if use_loc_filter:\n            loc_mask = loc_pred >= self.loc_filter_thr\n        else:\n            loc_mask = loc_pred >= 0.0\n        mask = loc_mask.permute(1, 2, 0).expand(-1, -1, self.num_base_priors)\n        mask = mask.contiguous().view(-1)\n        # calculate guided anchors\n        squares = squares[mask]\n        anchor_deltas = shape_pred.permute(1, 2, 0).contiguous().view(\n            -1, 2).detach()[mask]\n        bbox_deltas = anchor_deltas.new_full(squares.size(), 0)\n        bbox_deltas[:, 2:] = anchor_deltas\n        guided_anchors = self.anchor_coder.decode(\n            squares, bbox_deltas, wh_ratio_clip=1e-6)\n        return guided_anchors, mask\n\n    def ga_loc_targets(self, gt_bboxes_list, featmap_sizes):\n        \"\"\"Compute location targets for guided anchoring.\n\n        Each feature map is divided into positive, negative and ignore regions.\n        - positive regions: target 1, weight 1\n        - ignore regions: target 0, weight 0\n        - negative regions: target 0, weight 0.1\n\n        Args:\n            gt_bboxes_list (list[Tensor]): Gt bboxes of each image.\n            featmap_sizes (list[tuple]): Multi level sizes of each feature\n                maps.\n\n        Returns:\n            tuple\n        \"\"\"\n        anchor_scale = self.approx_anchor_generator.octave_base_scale\n        anchor_strides = self.approx_anchor_generator.strides\n        # Currently only supports same stride in x and y direction.\n        for stride in anchor_strides:\n            assert (stride[0] == stride[1])\n        anchor_strides = [stride[0] for stride in anchor_strides]\n\n        center_ratio = self.train_cfg.center_ratio\n        ignore_ratio = self.train_cfg.ignore_ratio\n        img_per_gpu = len(gt_bboxes_list)\n        num_lvls = len(featmap_sizes)\n        r1 = (1 - center_ratio) / 2\n        r2 = (1 - ignore_ratio) / 2\n        all_loc_targets = []\n        all_loc_weights = []\n        all_ignore_map = []\n        for lvl_id in range(num_lvls):\n            h, w = featmap_sizes[lvl_id]\n            loc_targets = torch.zeros(\n                img_per_gpu,\n                1,\n                h,\n                w,\n                device=gt_bboxes_list[0].device,\n                dtype=torch.float32)\n            loc_weights = torch.full_like(loc_targets, -1)\n            ignore_map = torch.zeros_like(loc_targets)\n            all_loc_targets.append(loc_targets)\n            all_loc_weights.append(loc_weights)\n            all_ignore_map.append(ignore_map)\n        for img_id in range(img_per_gpu):\n            gt_bboxes = gt_bboxes_list[img_id]\n            scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *\n                               (gt_bboxes[:, 3] - gt_bboxes[:, 1]))\n            min_anchor_size = scale.new_full(\n                (1, ), float(anchor_scale * anchor_strides[0]))\n            # assign gt bboxes to different feature levels w.r.t. their scales\n            target_lvls = torch.floor(\n                torch.log2(scale) - torch.log2(min_anchor_size) + 0.5)\n            target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long()\n            for gt_id in range(gt_bboxes.size(0)):\n                lvl = target_lvls[gt_id].item()\n                # rescaled to corresponding feature map\n                gt_ = gt_bboxes[gt_id, :4] / anchor_strides[lvl]\n                # calculate ignore regions\n                ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(\n                    gt_, r2, featmap_sizes[lvl])\n                # calculate positive (center) regions\n                ctr_x1, ctr_y1, ctr_x2, ctr_y2 = calc_region(\n                    gt_, r1, featmap_sizes[lvl])\n                all_loc_targets[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,\n                                     ctr_x1:ctr_x2 + 1] = 1\n                all_loc_weights[lvl][img_id, 0, ignore_y1:ignore_y2 + 1,\n                                     ignore_x1:ignore_x2 + 1] = 0\n                all_loc_weights[lvl][img_id, 0, ctr_y1:ctr_y2 + 1,\n                                     ctr_x1:ctr_x2 + 1] = 1\n                # calculate ignore map on nearby low level feature\n                if lvl > 0:\n                    d_lvl = lvl - 1\n                    # rescaled to corresponding feature map\n                    gt_ = gt_bboxes[gt_id, :4] / anchor_strides[d_lvl]\n                    ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(\n                        gt_, r2, featmap_sizes[d_lvl])\n                    all_ignore_map[d_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,\n                                          ignore_x1:ignore_x2 + 1] = 1\n                # calculate ignore map on nearby high level feature\n                if lvl < num_lvls - 1:\n                    u_lvl = lvl + 1\n                    # rescaled to corresponding feature map\n                    gt_ = gt_bboxes[gt_id, :4] / anchor_strides[u_lvl]\n                    ignore_x1, ignore_y1, ignore_x2, ignore_y2 = calc_region(\n                        gt_, r2, featmap_sizes[u_lvl])\n                    all_ignore_map[u_lvl][img_id, 0, ignore_y1:ignore_y2 + 1,\n                                          ignore_x1:ignore_x2 + 1] = 1\n        for lvl_id in range(num_lvls):\n            # ignore negative regions w.r.t. ignore map\n            all_loc_weights[lvl_id][(all_loc_weights[lvl_id] < 0)\n                                    & (all_ignore_map[lvl_id] > 0)] = 0\n            # set negative regions with weight 0.1\n            all_loc_weights[lvl_id][all_loc_weights[lvl_id] < 0] = 0.1\n        # loc average factor to balance loss\n        loc_avg_factor = sum(\n            [t.size(0) * t.size(-1) * t.size(-2)\n             for t in all_loc_targets]) / 200\n        return all_loc_targets, all_loc_weights, loc_avg_factor\n\n    def _ga_shape_target_single(self,\n                                flat_approxs,\n                                inside_flags,\n                                flat_squares,\n                                gt_bboxes,\n                                gt_bboxes_ignore,\n                                img_meta,\n                                unmap_outputs=True):\n        \"\"\"Compute guided anchoring targets.\n\n        This function returns sampled anchors and gt bboxes directly\n        rather than calculates regression targets.\n\n        Args:\n            flat_approxs (Tensor): flat approxs of a single image,\n                shape (n, 4)\n            inside_flags (Tensor): inside flags of a single image,\n                shape (n, ).\n            flat_squares (Tensor): flat squares of a single image,\n                shape (approxs_per_octave * n, 4)\n            gt_bboxes (Tensor): Ground truth bboxes of a single image.\n            img_meta (dict): Meta info of a single image.\n            approxs_per_octave (int): number of approxs per octave\n            cfg (dict): RPN train configs.\n            unmap_outputs (bool): unmap outputs or not.\n\n        Returns:\n            tuple\n        \"\"\"\n        if not inside_flags.any():\n            return (None, ) * 5\n        # assign gt and sample anchors\n        expand_inside_flags = inside_flags[:, None].expand(\n            -1, self.approxs_per_octave).reshape(-1)\n        approxs = flat_approxs[expand_inside_flags, :]\n        squares = flat_squares[inside_flags, :]\n\n        assign_result = self.ga_assigner.assign(approxs, squares,\n                                                self.approxs_per_octave,\n                                                gt_bboxes, gt_bboxes_ignore)\n        sampling_result = self.ga_sampler.sample(assign_result, squares,\n                                                 gt_bboxes)\n\n        bbox_anchors = torch.zeros_like(squares)\n        bbox_gts = torch.zeros_like(squares)\n        bbox_weights = torch.zeros_like(squares)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            bbox_anchors[pos_inds, :] = sampling_result.pos_bboxes\n            bbox_gts[pos_inds, :] = sampling_result.pos_gt_bboxes\n            bbox_weights[pos_inds, :] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_squares.size(0)\n            bbox_anchors = unmap(bbox_anchors, num_total_anchors, inside_flags)\n            bbox_gts = unmap(bbox_gts, num_total_anchors, inside_flags)\n            bbox_weights = unmap(bbox_weights, num_total_anchors, inside_flags)\n\n        return (bbox_anchors, bbox_gts, bbox_weights, pos_inds, neg_inds)\n\n    def ga_shape_targets(self,\n                         approx_list,\n                         inside_flag_list,\n                         square_list,\n                         gt_bboxes_list,\n                         img_metas,\n                         gt_bboxes_ignore_list=None,\n                         unmap_outputs=True):\n        \"\"\"Compute guided anchoring targets.\n\n        Args:\n            approx_list (list[list]): Multi level approxs of each image.\n            inside_flag_list (list[list]): Multi level inside flags of each\n                image.\n            square_list (list[list]): Multi level squares of each image.\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.\n            unmap_outputs (bool): unmap outputs or not.\n\n        Returns:\n            tuple\n        \"\"\"\n        num_imgs = len(img_metas)\n        assert len(approx_list) == len(inside_flag_list) == len(\n            square_list) == num_imgs\n        # anchor number of multi levels\n        num_level_squares = [squares.size(0) for squares in square_list[0]]\n        # concat all level anchors and flags to a single tensor\n        inside_flag_flat_list = []\n        approx_flat_list = []\n        square_flat_list = []\n        for i in range(num_imgs):\n            assert len(square_list[i]) == len(inside_flag_list[i])\n            inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))\n            approx_flat_list.append(torch.cat(approx_list[i]))\n            square_flat_list.append(torch.cat(square_list[i]))\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n        (all_bbox_anchors, all_bbox_gts, all_bbox_weights, pos_inds_list,\n         neg_inds_list) = multi_apply(\n             self._ga_shape_target_single,\n             approx_flat_list,\n             inside_flag_flat_list,\n             square_flat_list,\n             gt_bboxes_list,\n             gt_bboxes_ignore_list,\n             img_metas,\n             unmap_outputs=unmap_outputs)\n        # no valid anchors\n        if any([bbox_anchors is None for bbox_anchors in all_bbox_anchors]):\n            return None\n        # sampled anchors of all images\n        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n        # split targets to a list w.r.t. multiple levels\n        bbox_anchors_list = images_to_levels(all_bbox_anchors,\n                                             num_level_squares)\n        bbox_gts_list = images_to_levels(all_bbox_gts, num_level_squares)\n        bbox_weights_list = images_to_levels(all_bbox_weights,\n                                             num_level_squares)\n        return (bbox_anchors_list, bbox_gts_list, bbox_weights_list,\n                num_total_pos, num_total_neg)\n\n    def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts,\n                          anchor_weights, anchor_total_num):\n        shape_pred = shape_pred.permute(0, 2, 3, 1).contiguous().view(-1, 2)\n        bbox_anchors = bbox_anchors.contiguous().view(-1, 4)\n        bbox_gts = bbox_gts.contiguous().view(-1, 4)\n        anchor_weights = anchor_weights.contiguous().view(-1, 4)\n        bbox_deltas = bbox_anchors.new_full(bbox_anchors.size(), 0)\n        bbox_deltas[:, 2:] += shape_pred\n        # filter out negative samples to speed-up weighted_bounded_iou_loss\n        inds = torch.nonzero(\n            anchor_weights[:, 0] > 0, as_tuple=False).squeeze(1)\n        bbox_deltas_ = bbox_deltas[inds]\n        bbox_anchors_ = bbox_anchors[inds]\n        bbox_gts_ = bbox_gts[inds]\n        anchor_weights_ = anchor_weights[inds]\n        pred_anchors_ = self.anchor_coder.decode(\n            bbox_anchors_, bbox_deltas_, wh_ratio_clip=1e-6)\n        loss_shape = self.loss_shape(\n            pred_anchors_,\n            bbox_gts_,\n            anchor_weights_,\n            avg_factor=anchor_total_num)\n        return loss_shape\n\n    def loss_loc_single(self, loc_pred, loc_target, loc_weight,\n                        loc_avg_factor):\n        loss_loc = self.loss_loc(\n            loc_pred.reshape(-1, 1),\n            loc_target.reshape(-1).long(),\n            loc_weight.reshape(-1),\n            avg_factor=loc_avg_factor)\n        return loss_loc\n\n    @force_fp32(\n        apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             shape_preds,\n             loc_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.approx_anchor_generator.num_levels\n\n        device = cls_scores[0].device\n\n        # get loc targets\n        loc_targets, loc_weights, loc_avg_factor = self.ga_loc_targets(\n            gt_bboxes, featmap_sizes)\n\n        # get sampled approxes\n        approxs_list, inside_flag_list = self.get_sampled_approxs(\n            featmap_sizes, img_metas, device=device)\n        # get squares and guided anchors\n        squares_list, guided_anchors_list, _ = self.get_anchors(\n            featmap_sizes, shape_preds, loc_preds, img_metas, device=device)\n\n        # get shape targets\n        shape_targets = self.ga_shape_targets(approxs_list, inside_flag_list,\n                                              squares_list, gt_bboxes,\n                                              img_metas)\n        if shape_targets is None:\n            return None\n        (bbox_anchors_list, bbox_gts_list, anchor_weights_list, anchor_fg_num,\n         anchor_bg_num) = shape_targets\n        anchor_total_num = (\n            anchor_fg_num if not self.ga_sampling else anchor_fg_num +\n            anchor_bg_num)\n\n        # get anchor targets\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n        cls_reg_targets = self.get_targets(\n            guided_anchors_list,\n            inside_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels)\n        if cls_reg_targets is None:\n            return None\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg) = cls_reg_targets\n        num_total_samples = (\n            num_total_pos + num_total_neg if self.sampling else num_total_pos)\n\n        # anchor number of multi levels\n        num_level_anchors = [\n            anchors.size(0) for anchors in guided_anchors_list[0]\n        ]\n        # concat all level anchors to a single tensor\n        concat_anchor_list = []\n        for i in range(len(guided_anchors_list)):\n            concat_anchor_list.append(torch.cat(guided_anchors_list[i]))\n        all_anchor_list = images_to_levels(concat_anchor_list,\n                                           num_level_anchors)\n\n        # get classification and bbox regression losses\n        losses_cls, losses_bbox = multi_apply(\n            self.loss_single,\n            cls_scores,\n            bbox_preds,\n            all_anchor_list,\n            labels_list,\n            label_weights_list,\n            bbox_targets_list,\n            bbox_weights_list,\n            num_total_samples=num_total_samples)\n\n        # get anchor location loss\n        losses_loc = []\n        for i in range(len(loc_preds)):\n            loss_loc = self.loss_loc_single(\n                loc_preds[i],\n                loc_targets[i],\n                loc_weights[i],\n                loc_avg_factor=loc_avg_factor)\n            losses_loc.append(loss_loc)\n\n        # get anchor shape loss\n        losses_shape = []\n        for i in range(len(shape_preds)):\n            loss_shape = self.loss_shape_single(\n                shape_preds[i],\n                bbox_anchors_list[i],\n                bbox_gts_list[i],\n                anchor_weights_list[i],\n                anchor_total_num=anchor_total_num)\n            losses_shape.append(loss_shape)\n\n        return dict(\n            loss_cls=losses_cls,\n            loss_bbox=losses_bbox,\n            loss_shape=losses_shape,\n            loss_loc=losses_loc)\n\n    @force_fp32(\n        apply_to=('cls_scores', 'bbox_preds', 'shape_preds', 'loc_preds'))\n    def get_bboxes(self,\n                   cls_scores,\n                   bbox_preds,\n                   shape_preds,\n                   loc_preds,\n                   img_metas,\n                   cfg=None,\n                   rescale=False):\n        assert len(cls_scores) == len(bbox_preds) == len(shape_preds) == len(\n            loc_preds)\n        num_levels = len(cls_scores)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        device = cls_scores[0].device\n        # get guided anchors\n        _, guided_anchors, loc_masks = self.get_anchors(\n            featmap_sizes,\n            shape_preds,\n            loc_preds,\n            img_metas,\n            use_loc_filter=not self.training,\n            device=device)\n        result_list = []\n        for img_id in range(len(img_metas)):\n            cls_score_list = [\n                cls_scores[i][img_id].detach() for i in range(num_levels)\n            ]\n            bbox_pred_list = [\n                bbox_preds[i][img_id].detach() for i in range(num_levels)\n            ]\n            guided_anchor_list = [\n                guided_anchors[img_id][i].detach() for i in range(num_levels)\n            ]\n            loc_mask_list = [\n                loc_masks[img_id][i].detach() for i in range(num_levels)\n            ]\n            img_shape = img_metas[img_id]['img_shape']\n            scale_factor = img_metas[img_id]['scale_factor']\n            proposals = self._get_bboxes_single(cls_score_list, bbox_pred_list,\n                                                guided_anchor_list,\n                                                loc_mask_list, img_shape,\n                                                scale_factor, cfg, rescale)\n            result_list.append(proposals)\n        return result_list\n\n    def _get_bboxes_single(self,\n                           cls_scores,\n                           bbox_preds,\n                           mlvl_anchors,\n                           mlvl_masks,\n                           img_shape,\n                           scale_factor,\n                           cfg,\n                           rescale=False):\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(cls_scores) == len(bbox_preds) == len(mlvl_anchors)\n        mlvl_bboxes = []\n        mlvl_scores = []\n        for cls_score, bbox_pred, anchors, mask in zip(cls_scores, bbox_preds,\n                                                       mlvl_anchors,\n                                                       mlvl_masks):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n            # if no location is kept, end.\n            if mask.sum() == 0:\n                continue\n            # reshape scores and bbox_pred\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                scores = cls_score.softmax(-1)\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n            # filter scores, bbox_pred w.r.t. mask.\n            # anchors are filtered in get_anchors() beforehand.\n            scores = scores[mask, :]\n            bbox_pred = bbox_pred[mask, :]\n            if scores.dim() == 0:\n                anchors = anchors.unsqueeze(0)\n                scores = scores.unsqueeze(0)\n                bbox_pred = bbox_pred.unsqueeze(0)\n            # filter anchors, bbox_pred, scores w.r.t. scores\n            nms_pre = cfg.get('nms_pre', -1)\n            if nms_pre > 0 and scores.shape[0] > nms_pre:\n                if self.use_sigmoid_cls:\n                    max_scores, _ = scores.max(dim=1)\n                else:\n                    # remind that we set FG labels to [0, num_class-1]\n                    # since mmdet v2.0\n                    # BG cat_id: num_class\n                    max_scores, _ = scores[:, :-1].max(dim=1)\n                _, topk_inds = max_scores.topk(nms_pre)\n                anchors = anchors[topk_inds, :]\n                bbox_pred = bbox_pred[topk_inds, :]\n                scores = scores[topk_inds, :]\n            bboxes = self.bbox_coder.decode(\n                anchors, bbox_pred, max_shape=img_shape)\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n        mlvl_bboxes = torch.cat(mlvl_bboxes)\n        if rescale:\n            mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n        mlvl_scores = torch.cat(mlvl_scores)\n        if self.use_sigmoid_cls:\n            # Add a dummy background class to the backend when using sigmoid\n            # remind that we set FG labels to [0, num_class-1] since mmdet v2.0\n            # BG cat_id: num_class\n            padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n            mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)\n        # multi class NMS\n        det_bboxes, det_labels = multiclass_nms(mlvl_bboxes, mlvl_scores,\n                                                cfg.score_thr, cfg.nms,\n                                                cfg.max_per_img)\n        return det_bboxes, det_labels\n"
  },
  {
    "path": "mmdet/models/dense_heads/lad_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import bbox_overlaps, multi_apply\nfrom ..builder import HEADS\nfrom .paa_head import PAAHead, levels_to_images\n\n\n@HEADS.register_module()\nclass LADHead(PAAHead):\n    \"\"\"Label Assignment Head from the paper: `Improving Object Detection by\n    Label Assignment Distillation <https://arxiv.org/pdf/2108.10520.pdf>`_\"\"\"\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))\n    def get_label_assignment(self,\n                             cls_scores,\n                             bbox_preds,\n                             iou_preds,\n                             gt_bboxes,\n                             gt_labels,\n                             img_metas,\n                             gt_bboxes_ignore=None):\n        \"\"\"Get label assignment (from teacher).\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level.\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            iou_preds (list[Tensor]): iou_preds for each scale\n                level with shape (N, num_anchors * 1, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n                boxes can be ignored when are computing the loss.\n\n        Returns:\n            tuple: Returns a tuple containing label assignment variables.\n\n                - labels (Tensor): Labels of all anchors, each with\n                    shape (num_anchors,).\n                - labels_weight (Tensor): Label weights of all anchor.\n                    each with shape (num_anchors,).\n                - bboxes_target (Tensor): BBox targets of all anchors.\n                    each with shape (num_anchors, 4).\n                - bboxes_weight (Tensor): BBox weights of all anchors.\n                    each with shape (num_anchors, 4).\n                - pos_inds_flatten (Tensor): Contains all index of positive\n                    sample in all anchor.\n                - pos_anchors (Tensor): Positive anchors.\n                - num_pos (int): Number of positive anchors.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels,\n        )\n        (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds,\n         pos_gt_index) = cls_reg_targets\n        cls_scores = levels_to_images(cls_scores)\n        cls_scores = [\n            item.reshape(-1, self.cls_out_channels) for item in cls_scores\n        ]\n        bbox_preds = levels_to_images(bbox_preds)\n        bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]\n        pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list,\n                                       cls_scores, bbox_preds, labels,\n                                       labels_weight, bboxes_target,\n                                       bboxes_weight, pos_inds)\n\n        with torch.no_grad():\n            reassign_labels, reassign_label_weight, \\\n                reassign_bbox_weights, num_pos = multi_apply(\n                    self.paa_reassign,\n                    pos_losses_list,\n                    labels,\n                    labels_weight,\n                    bboxes_weight,\n                    pos_inds,\n                    pos_gt_index,\n                    anchor_list)\n            num_pos = sum(num_pos)\n        # convert all tensor list to a flatten tensor\n        labels = torch.cat(reassign_labels, 0).view(-1)\n        flatten_anchors = torch.cat(\n            [torch.cat(item, 0) for item in anchor_list])\n        labels_weight = torch.cat(reassign_label_weight, 0).view(-1)\n        bboxes_target = torch.cat(bboxes_target,\n                                  0).view(-1, bboxes_target[0].size(-1))\n\n        pos_inds_flatten = ((labels >= 0)\n                            &\n                            (labels < self.num_classes)).nonzero().reshape(-1)\n\n        if num_pos:\n            pos_anchors = flatten_anchors[pos_inds_flatten]\n        else:\n            pos_anchors = None\n\n        label_assignment_results = (labels, labels_weight, bboxes_target,\n                                    bboxes_weight, pos_inds_flatten,\n                                    pos_anchors, num_pos)\n        return label_assignment_results\n\n    def forward_train(self,\n                      x,\n                      label_assignment_results,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels=None,\n                      gt_bboxes_ignore=None,\n                      **kwargs):\n        \"\"\"Forward train with the available label assignment (student receives\n        from teacher).\n\n        Args:\n            x (list[Tensor]): Features from FPN.\n            label_assignment_results (tuple): As the outputs defined in the\n                function `self.get_label_assignment`.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes (Tensor): Ground truth bboxes of the image,\n                shape (num_gts, 4).\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts,).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n\n        Returns:\n            losses: (dict[str, Tensor]): A dictionary of loss components.\n        \"\"\"\n        outs = self(x)\n        if gt_labels is None:\n            loss_inputs = outs + (gt_bboxes, img_metas)\n        else:\n            loss_inputs = outs + (gt_bboxes, gt_labels, img_metas)\n        losses = self.loss(\n            *loss_inputs,\n            gt_bboxes_ignore=gt_bboxes_ignore,\n            label_assignment_results=label_assignment_results)\n        return losses\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             iou_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None,\n             label_assignment_results=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            iou_preds (list[Tensor]): iou_preds for each scale\n                level with shape (N, num_anchors * 1, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n                boxes can be ignored when are computing the loss.\n            label_assignment_results (tuple): As the outputs defined in the\n                function `self.get_label_assignment`.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss gmm_assignment.\n        \"\"\"\n\n        (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds_flatten,\n         pos_anchors, num_pos) = label_assignment_results\n\n        cls_scores = levels_to_images(cls_scores)\n        cls_scores = [\n            item.reshape(-1, self.cls_out_channels) for item in cls_scores\n        ]\n        bbox_preds = levels_to_images(bbox_preds)\n        bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]\n        iou_preds = levels_to_images(iou_preds)\n        iou_preds = [item.reshape(-1, 1) for item in iou_preds]\n\n        # convert all tensor list to a flatten tensor\n        cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))\n        bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1))\n        iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1))\n\n        losses_cls = self.loss_cls(\n            cls_scores,\n            labels,\n            labels_weight,\n            avg_factor=max(num_pos, len(img_metas)))  # avoid num_pos=0\n        if num_pos:\n            pos_bbox_pred = self.bbox_coder.decode(\n                pos_anchors, bbox_preds[pos_inds_flatten])\n            pos_bbox_target = bboxes_target[pos_inds_flatten]\n            iou_target = bbox_overlaps(\n                pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True)\n            losses_iou = self.loss_centerness(\n                iou_preds[pos_inds_flatten],\n                iou_target.unsqueeze(-1),\n                avg_factor=num_pos)\n            losses_bbox = self.loss_bbox(\n                pos_bbox_pred, pos_bbox_target, avg_factor=num_pos)\n\n        else:\n            losses_iou = iou_preds.sum() * 0\n            losses_bbox = bbox_preds.sum() * 0\n\n        return dict(\n            loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)\n"
  },
  {
    "path": "mmdet/models/dense_heads/ld_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import bbox_overlaps, multi_apply, reduce_mean\nfrom ..builder import HEADS, build_loss\nfrom .gfl_head import GFLHead\n\n\n@HEADS.register_module()\nclass LDHead(GFLHead):\n    \"\"\"Localization distillation Head. (Short description)\n\n    It utilizes the learned bbox distributions to transfer the localization\n    dark knowledge from teacher to student. Original paper: `Localization\n    Distillation for Object Detection. <https://arxiv.org/abs/2102.12252>`_\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        loss_ld (dict): Config of Localization Distillation Loss (LD),\n            T is the temperature for distillation.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 loss_ld=dict(\n                     type='LocalizationDistillationLoss',\n                     loss_weight=0.25,\n                     T=10),\n                 **kwargs):\n\n        super(LDHead, self).__init__(num_classes, in_channels, **kwargs)\n        self.loss_ld = build_loss(loss_ld)\n\n    def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,\n                    bbox_targets, stride, soft_targets, num_total_samples):\n        \"\"\"Compute loss of a single scale level.\n\n        Args:\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            cls_score (Tensor): Cls and quality joint scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_pred (Tensor): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (N, num_total_anchors, 4).\n            stride (tuple): Stride in this scale level.\n            num_total_samples (int): Number of positive samples that is\n                reduced over all GPUs.\n\n        Returns:\n            dict[tuple, Tensor]: Loss components and weight targets.\n        \"\"\"\n        assert stride[0] == stride[1], 'h stride is not equal to w stride!'\n        anchors = anchors.reshape(-1, 4)\n        cls_score = cls_score.permute(0, 2, 3,\n                                      1).reshape(-1, self.cls_out_channels)\n        bbox_pred = bbox_pred.permute(0, 2, 3,\n                                      1).reshape(-1, 4 * (self.reg_max + 1))\n        soft_targets = soft_targets.permute(0, 2, 3,\n                                            1).reshape(-1,\n                                                       4 * (self.reg_max + 1))\n\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    & (labels < bg_class_ind)).nonzero().squeeze(1)\n        score = label_weights.new_zeros(labels.shape)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n            pos_anchors = anchors[pos_inds]\n            pos_anchor_centers = self.anchor_center(pos_anchors) / stride[0]\n\n            weight_targets = cls_score.detach().sigmoid()\n            weight_targets = weight_targets.max(dim=1)[0][pos_inds]\n            pos_bbox_pred_corners = self.integral(pos_bbox_pred)\n            pos_decode_bbox_pred = self.bbox_coder.decode(\n                pos_anchor_centers, pos_bbox_pred_corners)\n            pos_decode_bbox_targets = pos_bbox_targets / stride[0]\n            score[pos_inds] = bbox_overlaps(\n                pos_decode_bbox_pred.detach(),\n                pos_decode_bbox_targets,\n                is_aligned=True)\n            pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)\n            pos_soft_targets = soft_targets[pos_inds]\n            soft_corners = pos_soft_targets.reshape(-1, self.reg_max + 1)\n\n            target_corners = self.bbox_coder.encode(pos_anchor_centers,\n                                                    pos_decode_bbox_targets,\n                                                    self.reg_max).reshape(-1)\n\n            # regression loss\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_decode_bbox_targets,\n                weight=weight_targets,\n                avg_factor=1.0)\n\n            # dfl loss\n            loss_dfl = self.loss_dfl(\n                pred_corners,\n                target_corners,\n                weight=weight_targets[:, None].expand(-1, 4).reshape(-1),\n                avg_factor=4.0)\n\n            # ld loss\n            loss_ld = self.loss_ld(\n                pred_corners,\n                soft_corners,\n                weight=weight_targets[:, None].expand(-1, 4).reshape(-1),\n                avg_factor=4.0)\n\n        else:\n            loss_ld = bbox_pred.sum() * 0\n            loss_bbox = bbox_pred.sum() * 0\n            loss_dfl = bbox_pred.sum() * 0\n            weight_targets = bbox_pred.new_tensor(0)\n\n        # cls (qfl) loss\n        loss_cls = self.loss_cls(\n            cls_score, (labels, score),\n            weight=label_weights,\n            avg_factor=num_total_samples)\n\n        return loss_cls, loss_bbox, loss_dfl, loss_ld, weight_targets.sum()\n\n    def forward_train(self,\n                      x,\n                      out_teacher,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels=None,\n                      gt_bboxes_ignore=None,\n                      proposal_cfg=None,\n                      **kwargs):\n        \"\"\"\n        Args:\n            x (list[Tensor]): Features from FPN.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes (Tensor): Ground truth bboxes of the image,\n                shape (num_gts, 4).\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts,).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n            proposal_cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used\n\n        Returns:\n            tuple[dict, list]: The loss components and proposals of each image.\n\n            - losses (dict[str, Tensor]): A dictionary of loss components.\n            - proposal_list (list[Tensor]): Proposals of each image.\n        \"\"\"\n        outs = self(x)\n        soft_target = out_teacher[1]\n        if gt_labels is None:\n            loss_inputs = outs + (gt_bboxes, soft_target, img_metas)\n        else:\n            loss_inputs = outs + (gt_bboxes, gt_labels, soft_target, img_metas)\n        losses = self.loss(*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n        if proposal_cfg is None:\n            return losses\n        else:\n            proposal_list = self.get_bboxes(*outs, img_metas, cfg=proposal_cfg)\n            return losses, proposal_list\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             soft_target,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Cls and quality scores for each scale\n                level has shape (N, num_classes, H, W).\n            bbox_preds (list[Tensor]): Box distribution logits for each scale\n                level with shape (N, 4*(n+1), H, W), n is max value of integral\n                set.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor] | None): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels)\n        if cls_reg_targets is None:\n            return None\n\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets\n\n        num_total_samples = reduce_mean(\n            torch.tensor(num_total_pos, dtype=torch.float,\n                         device=device)).item()\n        num_total_samples = max(num_total_samples, 1.0)\n\n        losses_cls, losses_bbox, losses_dfl, losses_ld, \\\n            avg_factor = multi_apply(\n                self.loss_single,\n                anchor_list,\n                cls_scores,\n                bbox_preds,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                self.prior_generator.strides,\n                soft_target,\n                num_total_samples=num_total_samples)\n\n        avg_factor = sum(avg_factor) + 1e-6\n        avg_factor = reduce_mean(avg_factor).item()\n        losses_bbox = [x / avg_factor for x in losses_bbox]\n        losses_dfl = [x / avg_factor for x in losses_dfl]\n        return dict(\n            loss_cls=losses_cls,\n            loss_bbox=losses_bbox,\n            loss_dfl=losses_dfl,\n            loss_ld=losses_ld)\n"
  },
  {
    "path": "mmdet/models/dense_heads/mask2former_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init\nfrom mmcv.cnn.bricks.transformer import (build_positional_encoding,\n                                         build_transformer_layer_sequence)\nfrom mmcv.ops import point_sample\nfrom mmcv.runner import ModuleList\n\nfrom mmdet.core import build_assigner, build_sampler, reduce_mean\nfrom mmdet.models.utils import get_uncertain_point_coords_with_randomness\nfrom ..builder import HEADS, build_loss\nfrom .anchor_free_head import AnchorFreeHead\nfrom .maskformer_head import MaskFormerHead\n\n\n@HEADS.register_module()\nclass Mask2FormerHead(MaskFormerHead):\n    \"\"\"Implements the Mask2Former head.\n\n    See `Masked-attention Mask Transformer for Universal Image\n    Segmentation <https://arxiv.org/pdf/2112.01527>`_ for details.\n\n    Args:\n        in_channels (list[int]): Number of channels in the input feature map.\n        feat_channels (int): Number of channels for features.\n        out_channels (int): Number of channels for output.\n        num_things_classes (int): Number of things.\n        num_stuff_classes (int): Number of stuff.\n        num_queries (int): Number of query in Transformer decoder.\n        pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel\n            decoder. Defaults to None.\n        enforce_decoder_input_project (bool, optional): Whether to add\n            a layer to change the embed_dim of tranformer encoder in\n            pixel decoder to the embed_dim of transformer decoder.\n            Defaults to False.\n        transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for\n            transformer decoder. Defaults to None.\n        positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for\n            transformer decoder position encoding. Defaults to None.\n        loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification\n            loss. Defaults to None.\n        loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss.\n            Defaults to None.\n        loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss.\n            Defaults to None.\n        train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of\n            Mask2Former head.\n        test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of\n            Mask2Former head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 feat_channels,\n                 out_channels,\n                 num_things_classes=80,\n                 num_stuff_classes=53,\n                 num_queries=100,\n                 num_transformer_feat_level=3,\n                 pixel_decoder=None,\n                 enforce_decoder_input_project=False,\n                 transformer_decoder=None,\n                 positional_encoding=None,\n                 loss_cls=None,\n                 loss_mask=None,\n                 loss_dice=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=None,\n                 **kwargs):\n        super(AnchorFreeHead, self).__init__(init_cfg)\n        self.num_things_classes = num_things_classes\n        self.num_stuff_classes = num_stuff_classes\n        self.num_classes = self.num_things_classes + self.num_stuff_classes\n        self.num_queries = num_queries\n        self.num_transformer_feat_level = num_transformer_feat_level\n        self.num_heads = transformer_decoder.transformerlayers.\\\n            attn_cfgs.num_heads\n        self.num_transformer_decoder_layers = transformer_decoder.num_layers\n        assert pixel_decoder.encoder.transformerlayers.\\\n            attn_cfgs.num_levels == num_transformer_feat_level\n        pixel_decoder_ = copy.deepcopy(pixel_decoder)\n        pixel_decoder_.update(\n            in_channels=in_channels,\n            feat_channels=feat_channels,\n            out_channels=out_channels)\n        self.pixel_decoder = build_plugin_layer(pixel_decoder_)[1]\n        self.transformer_decoder = build_transformer_layer_sequence(\n            transformer_decoder)\n        self.decoder_embed_dims = self.transformer_decoder.embed_dims\n\n        self.decoder_input_projs = ModuleList()\n        # from low resolution to high resolution\n        for _ in range(num_transformer_feat_level):\n            if (self.decoder_embed_dims != feat_channels\n                    or enforce_decoder_input_project):\n                self.decoder_input_projs.append(\n                    Conv2d(\n                        feat_channels, self.decoder_embed_dims, kernel_size=1))\n            else:\n                self.decoder_input_projs.append(nn.Identity())\n        self.decoder_positional_encoding = build_positional_encoding(\n            positional_encoding)\n        self.query_embed = nn.Embedding(self.num_queries, feat_channels)\n        self.query_feat = nn.Embedding(self.num_queries, feat_channels)\n        # from low resolution to high resolution\n        self.level_embed = nn.Embedding(self.num_transformer_feat_level,\n                                        feat_channels)\n\n        self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1)\n        self.mask_embed = nn.Sequential(\n            nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),\n            nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),\n            nn.Linear(feat_channels, out_channels))\n\n        self.test_cfg = test_cfg\n        self.train_cfg = train_cfg\n        if train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            self.sampler = build_sampler(self.train_cfg.sampler, context=self)\n            self.num_points = self.train_cfg.get('num_points', 12544)\n            self.oversample_ratio = self.train_cfg.get('oversample_ratio', 3.0)\n            self.importance_sample_ratio = self.train_cfg.get(\n                'importance_sample_ratio', 0.75)\n\n        self.class_weight = loss_cls.class_weight\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_mask = build_loss(loss_mask)\n        self.loss_dice = build_loss(loss_dice)\n\n    def init_weights(self):\n        for m in self.decoder_input_projs:\n            if isinstance(m, Conv2d):\n                caffe2_xavier_init(m, bias=0)\n\n        self.pixel_decoder.init_weights()\n\n        for p in self.transformer_decoder.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_normal_(p)\n\n    def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks,\n                           img_metas):\n        \"\"\"Compute classification and mask targets for one image.\n\n        Args:\n            cls_score (Tensor): Mask score logits from a single decoder layer\n                for one image. Shape (num_queries, cls_out_channels).\n            mask_pred (Tensor): Mask logits for a single decoder layer for one\n                image. Shape (num_queries, h, w).\n            gt_labels (Tensor): Ground truth class indices for one image with\n                shape (num_gts, ).\n            gt_masks (Tensor): Ground truth mask for each image, each with\n                shape (num_gts, h, w).\n            img_metas (dict): Image informtation.\n\n        Returns:\n            tuple[Tensor]: A tuple containing the following for one image.\n\n                - labels (Tensor): Labels of each image. \\\n                    shape (num_queries, ).\n                - label_weights (Tensor): Label weights of each image. \\\n                    shape (num_queries, ).\n                - mask_targets (Tensor): Mask targets of each image. \\\n                    shape (num_queries, h, w).\n                - mask_weights (Tensor): Mask weights of each image. \\\n                    shape (num_queries, ).\n                - pos_inds (Tensor): Sampled positive indices for each \\\n                    image.\n                - neg_inds (Tensor): Sampled negative indices for each \\\n                    image.\n        \"\"\"\n        # sample points\n        num_queries = cls_score.shape[0]\n        num_gts = gt_labels.shape[0]\n\n        point_coords = torch.rand((1, self.num_points, 2),\n                                  device=cls_score.device)\n        # shape (num_queries, num_points)\n        mask_points_pred = point_sample(\n            mask_pred.unsqueeze(1), point_coords.repeat(num_queries, 1,\n                                                        1)).squeeze(1)\n        # shape (num_gts, num_points)\n        gt_points_masks = point_sample(\n            gt_masks.unsqueeze(1).float(), point_coords.repeat(num_gts, 1,\n                                                               1)).squeeze(1)\n\n        # assign and sample\n        assign_result = self.assigner.assign(cls_score, mask_points_pred,\n                                             gt_labels, gt_points_masks,\n                                             img_metas)\n        sampling_result = self.sampler.sample(assign_result, mask_pred,\n                                              gt_masks)\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n\n        # label target\n        labels = gt_labels.new_full((self.num_queries, ),\n                                    self.num_classes,\n                                    dtype=torch.long)\n        labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]\n        label_weights = gt_labels.new_ones((self.num_queries, ))\n\n        # mask target\n        mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds]\n        mask_weights = mask_pred.new_zeros((self.num_queries, ))\n        mask_weights[pos_inds] = 1.0\n\n        return (labels, label_weights, mask_targets, mask_weights, pos_inds,\n                neg_inds)\n\n    def loss_single(self, cls_scores, mask_preds, gt_labels_list,\n                    gt_masks_list, img_metas):\n        \"\"\"Loss function for outputs from a single decoder layer.\n\n        Args:\n            cls_scores (Tensor): Mask score logits from a single decoder layer\n                for all images. Shape (batch_size, num_queries,\n                cls_out_channels). Note `cls_out_channels` should includes\n                background.\n            mask_preds (Tensor): Mask logits for a pixel decoder for all\n                images. Shape (batch_size, num_queries, h, w).\n            gt_labels_list (list[Tensor]): Ground truth class indices for each\n                image, each with shape (num_gts, ).\n            gt_masks_list (list[Tensor]): Ground truth mask for each image,\n                each with shape (num_gts, h, w).\n            img_metas (list[dict]): List of image meta information.\n\n        Returns:\n            tuple[Tensor]: Loss components for outputs from a single \\\n                decoder layer.\n        \"\"\"\n        num_imgs = cls_scores.size(0)\n        cls_scores_list = [cls_scores[i] for i in range(num_imgs)]\n        mask_preds_list = [mask_preds[i] for i in range(num_imgs)]\n        (labels_list, label_weights_list, mask_targets_list, mask_weights_list,\n         num_total_pos,\n         num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list,\n                                           gt_labels_list, gt_masks_list,\n                                           img_metas)\n        # shape (batch_size, num_queries)\n        labels = torch.stack(labels_list, dim=0)\n        # shape (batch_size, num_queries)\n        label_weights = torch.stack(label_weights_list, dim=0)\n        # shape (num_total_gts, h, w)\n        mask_targets = torch.cat(mask_targets_list, dim=0)\n        # shape (batch_size, num_queries)\n        mask_weights = torch.stack(mask_weights_list, dim=0)\n\n        # classfication loss\n        # shape (batch_size * num_queries, )\n        cls_scores = cls_scores.flatten(0, 1)\n        labels = labels.flatten(0, 1)\n        label_weights = label_weights.flatten(0, 1)\n\n        class_weight = cls_scores.new_tensor(self.class_weight)\n        loss_cls = self.loss_cls(\n            cls_scores,\n            labels,\n            label_weights,\n            avg_factor=class_weight[labels].sum())\n\n        num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos]))\n        num_total_masks = max(num_total_masks, 1)\n\n        # extract positive ones\n        # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w)\n        mask_preds = mask_preds[mask_weights > 0]\n\n        if mask_targets.shape[0] == 0:\n            # zero match\n            loss_dice = mask_preds.sum()\n            loss_mask = mask_preds.sum()\n            return loss_cls, loss_mask, loss_dice\n\n        with torch.no_grad():\n            points_coords = get_uncertain_point_coords_with_randomness(\n                mask_preds.unsqueeze(1), None, self.num_points,\n                self.oversample_ratio, self.importance_sample_ratio)\n            # shape (num_total_gts, h, w) -> (num_total_gts, num_points)\n            mask_point_targets = point_sample(\n                mask_targets.unsqueeze(1).float(), points_coords).squeeze(1)\n        # shape (num_queries, h, w) -> (num_queries, num_points)\n        mask_point_preds = point_sample(\n            mask_preds.unsqueeze(1), points_coords).squeeze(1)\n\n        # dice loss\n        loss_dice = self.loss_dice(\n            mask_point_preds, mask_point_targets, avg_factor=num_total_masks)\n\n        # mask loss\n        # shape (num_queries, num_points) -> (num_queries * num_points, )\n        mask_point_preds = mask_point_preds.reshape(-1)\n        # shape (num_total_gts, num_points) -> (num_total_gts * num_points, )\n        mask_point_targets = mask_point_targets.reshape(-1)\n        loss_mask = self.loss_mask(\n            mask_point_preds,\n            mask_point_targets,\n            avg_factor=num_total_masks * self.num_points)\n\n        return loss_cls, loss_mask, loss_dice\n\n    def forward_head(self, decoder_out, mask_feature, attn_mask_target_size):\n        \"\"\"Forward for head part which is called after every decoder layer.\n\n        Args:\n            decoder_out (Tensor): in shape (num_queries, batch_size, c).\n            mask_feature (Tensor): in shape (batch_size, c, h, w).\n            attn_mask_target_size (tuple[int, int]): target attention\n                mask size.\n\n        Returns:\n            tuple: A tuple contain three elements.\n\n            - cls_pred (Tensor): Classification scores in shape \\\n                (batch_size, num_queries, cls_out_channels). \\\n                Note `cls_out_channels` should includes background.\n            - mask_pred (Tensor): Mask scores in shape \\\n                (batch_size, num_queries,h, w).\n            - attn_mask (Tensor): Attention mask in shape \\\n                (batch_size * num_heads, num_queries, h, w).\n        \"\"\"\n        decoder_out = self.transformer_decoder.post_norm(decoder_out)\n        decoder_out = decoder_out.transpose(0, 1)\n        # shape (batch_size, num_queries, c)\n        cls_pred = self.cls_embed(decoder_out)\n        # shape (batch_size, num_queries, c)\n        mask_embed = self.mask_embed(decoder_out)\n        # shape (batch_size, num_queries, h, w)\n        mask_pred = torch.einsum('bqc,bchw->bqhw', mask_embed, mask_feature)\n        attn_mask = F.interpolate(\n            mask_pred,\n            attn_mask_target_size,\n            mode='bilinear',\n            align_corners=False)\n        # shape (batch_size, num_queries, h, w) ->\n        #   (batch_size * num_head, num_queries, h*w)\n        attn_mask = attn_mask.flatten(2).unsqueeze(1).repeat(\n            (1, self.num_heads, 1, 1)).flatten(0, 1)\n        attn_mask = attn_mask.sigmoid() < 0.5\n        attn_mask = attn_mask.detach()\n\n        return cls_pred, mask_pred, attn_mask\n\n    def forward(self, feats, img_metas):\n        \"\"\"Forward function.\n\n        Args:\n            feats (list[Tensor]): Multi scale Features from the\n                upstream network, each is a 4D-tensor.\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            tuple: A tuple contains two elements.\n\n            - cls_pred_list (list[Tensor)]: Classification logits \\\n                for each decoder layer. Each is a 3D-tensor with shape \\\n                (batch_size, num_queries, cls_out_channels). \\\n                Note `cls_out_channels` should includes background.\n            - mask_pred_list (list[Tensor]): Mask logits for each \\\n                decoder layer. Each with shape (batch_size, num_queries, \\\n                 h, w).\n        \"\"\"\n        batch_size = len(img_metas)\n        mask_features, multi_scale_memorys = self.pixel_decoder(feats)\n        # multi_scale_memorys (from low resolution to high resolution)\n        decoder_inputs = []\n        decoder_positional_encodings = []\n        for i in range(self.num_transformer_feat_level):\n            decoder_input = self.decoder_input_projs[i](multi_scale_memorys[i])\n            # shape (batch_size, c, h, w) -> (h*w, batch_size, c)\n            decoder_input = decoder_input.flatten(2).permute(2, 0, 1)\n            level_embed = self.level_embed.weight[i].view(1, 1, -1)\n            decoder_input = decoder_input + level_embed\n            # shape (batch_size, c, h, w) -> (h*w, batch_size, c)\n            mask = decoder_input.new_zeros(\n                (batch_size, ) + multi_scale_memorys[i].shape[-2:],\n                dtype=torch.bool)\n            decoder_positional_encoding = self.decoder_positional_encoding(\n                mask)\n            decoder_positional_encoding = decoder_positional_encoding.flatten(\n                2).permute(2, 0, 1)\n            decoder_inputs.append(decoder_input)\n            decoder_positional_encodings.append(decoder_positional_encoding)\n        # shape (num_queries, c) -> (num_queries, batch_size, c)\n        query_feat = self.query_feat.weight.unsqueeze(1).repeat(\n            (1, batch_size, 1))\n        query_embed = self.query_embed.weight.unsqueeze(1).repeat(\n            (1, batch_size, 1))\n\n        cls_pred_list = []\n        mask_pred_list = []\n        cls_pred, mask_pred, attn_mask = self.forward_head(\n            query_feat, mask_features, multi_scale_memorys[0].shape[-2:])\n        cls_pred_list.append(cls_pred)\n        mask_pred_list.append(mask_pred)\n\n        for i in range(self.num_transformer_decoder_layers):\n            level_idx = i % self.num_transformer_feat_level\n            # if a mask is all True(all background), then set it all False.\n            attn_mask[torch.where(\n                attn_mask.sum(-1) == attn_mask.shape[-1])] = False\n\n            # cross_attn + self_attn\n            layer = self.transformer_decoder.layers[i]\n            attn_masks = [attn_mask, None]\n            query_feat = layer(\n                query=query_feat,\n                key=decoder_inputs[level_idx],\n                value=decoder_inputs[level_idx],\n                query_pos=query_embed,\n                key_pos=decoder_positional_encodings[level_idx],\n                attn_masks=attn_masks,\n                query_key_padding_mask=None,\n                # here we do not apply masking on padded region\n                key_padding_mask=None)\n            cls_pred, mask_pred, attn_mask = self.forward_head(\n                query_feat, mask_features, multi_scale_memorys[\n                    (i + 1) % self.num_transformer_feat_level].shape[-2:])\n\n            cls_pred_list.append(cls_pred)\n            mask_pred_list.append(mask_pred)\n\n        return cls_pred_list, mask_pred_list\n"
  },
  {
    "path": "mmdet/models/dense_heads/maskformer_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import Conv2d, build_plugin_layer, caffe2_xavier_init\nfrom mmcv.cnn.bricks.transformer import (build_positional_encoding,\n                                         build_transformer_layer_sequence)\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import build_assigner, build_sampler, multi_apply, reduce_mean\nfrom mmdet.models.utils import preprocess_panoptic_gt\nfrom ..builder import HEADS, build_loss\nfrom .anchor_free_head import AnchorFreeHead\n\n\n@HEADS.register_module()\nclass MaskFormerHead(AnchorFreeHead):\n    \"\"\"Implements the MaskFormer head.\n\n    See `Per-Pixel Classification is Not All You Need for Semantic\n    Segmentation <https://arxiv.org/pdf/2107.06278>`_ for details.\n\n    Args:\n        in_channels (list[int]): Number of channels in the input feature map.\n        feat_channels (int): Number of channels for feature.\n        out_channels (int): Number of channels for output.\n        num_things_classes (int): Number of things.\n        num_stuff_classes (int): Number of stuff.\n        num_queries (int): Number of query in Transformer.\n        pixel_decoder (:obj:`mmcv.ConfigDict` | dict): Config for pixel\n            decoder. Defaults to None.\n        enforce_decoder_input_project (bool, optional): Whether to add a layer\n            to change the embed_dim of tranformer encoder in pixel decoder to\n            the embed_dim of transformer decoder. Defaults to False.\n        transformer_decoder (:obj:`mmcv.ConfigDict` | dict): Config for\n            transformer decoder. Defaults to None.\n        positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for\n            transformer decoder position encoding. Defaults to None.\n        loss_cls (:obj:`mmcv.ConfigDict` | dict): Config of the classification\n            loss. Defaults to `CrossEntropyLoss`.\n        loss_mask (:obj:`mmcv.ConfigDict` | dict): Config of the mask loss.\n            Defaults to `FocalLoss`.\n        loss_dice (:obj:`mmcv.ConfigDict` | dict): Config of the dice loss.\n            Defaults to `DiceLoss`.\n        train_cfg (:obj:`mmcv.ConfigDict` | dict): Training config of\n            Maskformer head.\n        test_cfg (:obj:`mmcv.ConfigDict` | dict): Testing config of Maskformer\n            head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 feat_channels,\n                 out_channels,\n                 num_things_classes=80,\n                 num_stuff_classes=53,\n                 num_queries=100,\n                 pixel_decoder=None,\n                 enforce_decoder_input_project=False,\n                 transformer_decoder=None,\n                 positional_encoding=None,\n                 loss_cls=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=False,\n                     loss_weight=1.0,\n                     class_weight=[1.0] * 133 + [0.1]),\n                 loss_mask=dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=20.0),\n                 loss_dice=dict(\n                     type='DiceLoss',\n                     use_sigmoid=True,\n                     activate=True,\n                     naive_dice=True,\n                     loss_weight=1.0),\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=None,\n                 **kwargs):\n        super(AnchorFreeHead, self).__init__(init_cfg)\n        self.num_things_classes = num_things_classes\n        self.num_stuff_classes = num_stuff_classes\n        self.num_classes = self.num_things_classes + self.num_stuff_classes\n        self.num_queries = num_queries\n\n        pixel_decoder.update(\n            in_channels=in_channels,\n            feat_channels=feat_channels,\n            out_channels=out_channels)\n        self.pixel_decoder = build_plugin_layer(pixel_decoder)[1]\n        self.transformer_decoder = build_transformer_layer_sequence(\n            transformer_decoder)\n        self.decoder_embed_dims = self.transformer_decoder.embed_dims\n        pixel_decoder_type = pixel_decoder.get('type')\n        if pixel_decoder_type == 'PixelDecoder' and (\n                self.decoder_embed_dims != in_channels[-1]\n                or enforce_decoder_input_project):\n            self.decoder_input_proj = Conv2d(\n                in_channels[-1], self.decoder_embed_dims, kernel_size=1)\n        else:\n            self.decoder_input_proj = nn.Identity()\n        self.decoder_pe = build_positional_encoding(positional_encoding)\n        self.query_embed = nn.Embedding(self.num_queries, out_channels)\n\n        self.cls_embed = nn.Linear(feat_channels, self.num_classes + 1)\n        self.mask_embed = nn.Sequential(\n            nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),\n            nn.Linear(feat_channels, feat_channels), nn.ReLU(inplace=True),\n            nn.Linear(feat_channels, out_channels))\n\n        self.test_cfg = test_cfg\n        self.train_cfg = train_cfg\n        if train_cfg:\n            self.assigner = build_assigner(train_cfg.get('assigner', None))\n            self.sampler = build_sampler(\n                train_cfg.get('sampler', None), context=self)\n\n        self.class_weight = loss_cls.get('class_weight', None)\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_mask = build_loss(loss_mask)\n        self.loss_dice = build_loss(loss_dice)\n\n    def init_weights(self):\n        if isinstance(self.decoder_input_proj, Conv2d):\n            caffe2_xavier_init(self.decoder_input_proj, bias=0)\n\n        self.pixel_decoder.init_weights()\n\n        for p in self.transformer_decoder.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n\n    def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs,\n                      img_metas):\n        \"\"\"Preprocess the ground truth for all images.\n\n        Args:\n            gt_labels_list (list[Tensor]): Each is ground truth\n                labels of each bbox, with shape (num_gts, ).\n            gt_masks_list (list[BitmapMasks]): Each is ground truth\n                masks of each instances of a image, shape\n                (num_gts, h, w).\n            gt_semantic_seg (Tensor | None): Ground truth of semantic\n                segmentation with the shape (batch_size, n, h, w).\n                [0, num_thing_class - 1] means things,\n                [num_thing_class, num_class-1] means stuff,\n                255 means VOID. It's None when training instance segmentation.\n            img_metas (list[dict]): List of image meta information.\n\n        Returns:\n            tuple: a tuple containing the following targets.\n                - labels (list[Tensor]): Ground truth class indices\\\n                    for all images. Each with shape (n, ), n is the sum of\\\n                    number of stuff type and number of instance in a image.\n                - masks (list[Tensor]): Ground truth mask for each\\\n                    image, each with shape (n, h, w).\n        \"\"\"\n        num_things_list = [self.num_things_classes] * len(gt_labels_list)\n        num_stuff_list = [self.num_stuff_classes] * len(gt_labels_list)\n        if gt_semantic_segs is None:\n            gt_semantic_segs = [None] * len(gt_labels_list)\n\n        targets = multi_apply(preprocess_panoptic_gt, gt_labels_list,\n                              gt_masks_list, gt_semantic_segs, num_things_list,\n                              num_stuff_list, img_metas)\n        labels, masks = targets\n        return labels, masks\n\n    def get_targets(self, cls_scores_list, mask_preds_list, gt_labels_list,\n                    gt_masks_list, img_metas):\n        \"\"\"Compute classification and mask targets for all images for a decoder\n        layer.\n\n        Args:\n            cls_scores_list (list[Tensor]): Mask score logits from a single\n                decoder layer for all images. Each with shape (num_queries,\n                cls_out_channels).\n            mask_preds_list (list[Tensor]): Mask logits from a single decoder\n                layer for all images. Each with shape (num_queries, h, w).\n            gt_labels_list (list[Tensor]): Ground truth class indices for all\n                images. Each with shape (n, ), n is the sum of number of stuff\n                type and number of instance in a image.\n            gt_masks_list (list[Tensor]): Ground truth mask for each image,\n                each with shape (n, h, w).\n            img_metas (list[dict]): List of image meta information.\n\n        Returns:\n            tuple[list[Tensor]]: a tuple containing the following targets.\n                - labels_list (list[Tensor]): Labels of all images.\\\n                    Each with shape (num_queries, ).\n                - label_weights_list (list[Tensor]): Label weights\\\n                    of all images. Each with shape (num_queries, ).\n                - mask_targets_list (list[Tensor]): Mask targets of\\\n                    all images. Each with shape (num_queries, h, w).\n                - mask_weights_list (list[Tensor]): Mask weights of\\\n                    all images. Each with shape (num_queries, ).\n                - num_total_pos (int): Number of positive samples in\\\n                    all images.\n                - num_total_neg (int): Number of negative samples in\\\n                    all images.\n        \"\"\"\n        (labels_list, label_weights_list, mask_targets_list, mask_weights_list,\n         pos_inds_list,\n         neg_inds_list) = multi_apply(self._get_target_single, cls_scores_list,\n                                      mask_preds_list, gt_labels_list,\n                                      gt_masks_list, img_metas)\n\n        num_total_pos = sum((inds.numel() for inds in pos_inds_list))\n        num_total_neg = sum((inds.numel() for inds in neg_inds_list))\n        return (labels_list, label_weights_list, mask_targets_list,\n                mask_weights_list, num_total_pos, num_total_neg)\n\n    def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks,\n                           img_metas):\n        \"\"\"Compute classification and mask targets for one image.\n\n        Args:\n            cls_score (Tensor): Mask score logits from a single decoder layer\n                for one image. Shape (num_queries, cls_out_channels).\n            mask_pred (Tensor): Mask logits for a single decoder layer for one\n                image. Shape (num_queries, h, w).\n            gt_labels (Tensor): Ground truth class indices for one image with\n                shape (n, ). n is the sum of number of stuff type and number\n                of instance in a image.\n            gt_masks (Tensor): Ground truth mask for each image, each with\n                shape (n, h, w).\n            img_metas (dict): Image informtation.\n\n        Returns:\n            tuple[Tensor]: a tuple containing the following for one image.\n                - labels (Tensor): Labels of each image.\n                    shape (num_queries, ).\n                - label_weights (Tensor): Label weights of each image.\n                    shape (num_queries, ).\n                - mask_targets (Tensor): Mask targets of each image.\n                    shape (num_queries, h, w).\n                - mask_weights (Tensor): Mask weights of each image.\n                    shape (num_queries, ).\n                - pos_inds (Tensor): Sampled positive indices for each image.\n                - neg_inds (Tensor): Sampled negative indices for each image.\n        \"\"\"\n        target_shape = mask_pred.shape[-2:]\n        if gt_masks.shape[0] > 0:\n            gt_masks_downsampled = F.interpolate(\n                gt_masks.unsqueeze(1).float(), target_shape,\n                mode='nearest').squeeze(1).long()\n        else:\n            gt_masks_downsampled = gt_masks\n\n        # assign and sample\n        assign_result = self.assigner.assign(cls_score, mask_pred, gt_labels,\n                                             gt_masks_downsampled, img_metas)\n        sampling_result = self.sampler.sample(assign_result, mask_pred,\n                                              gt_masks)\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n\n        # label target\n        labels = gt_labels.new_full((self.num_queries, ),\n                                    self.num_classes,\n                                    dtype=torch.long)\n        labels[pos_inds] = gt_labels[sampling_result.pos_assigned_gt_inds]\n        label_weights = gt_labels.new_ones(self.num_queries)\n\n        # mask target\n        mask_targets = gt_masks[sampling_result.pos_assigned_gt_inds]\n        mask_weights = mask_pred.new_zeros((self.num_queries, ))\n        mask_weights[pos_inds] = 1.0\n\n        return (labels, label_weights, mask_targets, mask_weights, pos_inds,\n                neg_inds)\n\n    @force_fp32(apply_to=('all_cls_scores', 'all_mask_preds'))\n    def loss(self, all_cls_scores, all_mask_preds, gt_labels_list,\n             gt_masks_list, img_metas):\n        \"\"\"Loss function.\n\n        Args:\n            all_cls_scores (Tensor): Classification scores for all decoder\n                layers with shape (num_decoder, batch_size, num_queries,\n                cls_out_channels). Note `cls_out_channels` should includes\n                background.\n            all_mask_preds (Tensor): Mask scores for all decoder layers with\n                shape (num_decoder, batch_size, num_queries, h, w).\n            gt_labels_list (list[Tensor]): Ground truth class indices for each\n                image with shape (n, ). n is the sum of number of stuff type\n                and number of instance in a image.\n            gt_masks_list (list[Tensor]): Ground truth mask for each image with\n                shape (n, h, w).\n            img_metas (list[dict]): List of image meta information.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_dec_layers = len(all_cls_scores)\n        all_gt_labels_list = [gt_labels_list for _ in range(num_dec_layers)]\n        all_gt_masks_list = [gt_masks_list for _ in range(num_dec_layers)]\n        img_metas_list = [img_metas for _ in range(num_dec_layers)]\n        losses_cls, losses_mask, losses_dice = multi_apply(\n            self.loss_single, all_cls_scores, all_mask_preds,\n            all_gt_labels_list, all_gt_masks_list, img_metas_list)\n\n        loss_dict = dict()\n        # loss from the last decoder layer\n        loss_dict['loss_cls'] = losses_cls[-1]\n        loss_dict['loss_mask'] = losses_mask[-1]\n        loss_dict['loss_dice'] = losses_dice[-1]\n        # loss from other decoder layers\n        num_dec_layer = 0\n        for loss_cls_i, loss_mask_i, loss_dice_i in zip(\n                losses_cls[:-1], losses_mask[:-1], losses_dice[:-1]):\n            loss_dict[f'd{num_dec_layer}.loss_cls'] = loss_cls_i\n            loss_dict[f'd{num_dec_layer}.loss_mask'] = loss_mask_i\n            loss_dict[f'd{num_dec_layer}.loss_dice'] = loss_dice_i\n            num_dec_layer += 1\n        return loss_dict\n\n    def loss_single(self, cls_scores, mask_preds, gt_labels_list,\n                    gt_masks_list, img_metas):\n        \"\"\"Loss function for outputs from a single decoder layer.\n\n        Args:\n            cls_scores (Tensor): Mask score logits from a single decoder layer\n                for all images. Shape (batch_size, num_queries,\n                cls_out_channels). Note `cls_out_channels` should includes\n                background.\n            mask_preds (Tensor): Mask logits for a pixel decoder for all\n                images. Shape (batch_size, num_queries, h, w).\n            gt_labels_list (list[Tensor]): Ground truth class indices for each\n                image, each with shape (n, ). n is the sum of number of stuff\n                types and number of instances in a image.\n            gt_masks_list (list[Tensor]): Ground truth mask for each image,\n                each with shape (n, h, w).\n            img_metas (list[dict]): List of image meta information.\n\n        Returns:\n            tuple[Tensor]: Loss components for outputs from a single decoder\\\n                layer.\n        \"\"\"\n        num_imgs = cls_scores.size(0)\n        cls_scores_list = [cls_scores[i] for i in range(num_imgs)]\n        mask_preds_list = [mask_preds[i] for i in range(num_imgs)]\n\n        (labels_list, label_weights_list, mask_targets_list, mask_weights_list,\n         num_total_pos,\n         num_total_neg) = self.get_targets(cls_scores_list, mask_preds_list,\n                                           gt_labels_list, gt_masks_list,\n                                           img_metas)\n        # shape (batch_size, num_queries)\n        labels = torch.stack(labels_list, dim=0)\n        # shape (batch_size, num_queries)\n        label_weights = torch.stack(label_weights_list, dim=0)\n        # shape (num_total_gts, h, w)\n        mask_targets = torch.cat(mask_targets_list, dim=0)\n        # shape (batch_size, num_queries)\n        mask_weights = torch.stack(mask_weights_list, dim=0)\n\n        # classfication loss\n        # shape (batch_size * num_queries, )\n        cls_scores = cls_scores.flatten(0, 1)\n        labels = labels.flatten(0, 1)\n        label_weights = label_weights.flatten(0, 1)\n\n        class_weight = cls_scores.new_tensor(self.class_weight)\n        loss_cls = self.loss_cls(\n            cls_scores,\n            labels,\n            label_weights,\n            avg_factor=class_weight[labels].sum())\n\n        num_total_masks = reduce_mean(cls_scores.new_tensor([num_total_pos]))\n        num_total_masks = max(num_total_masks, 1)\n\n        # extract positive ones\n        # shape (batch_size, num_queries, h, w) -> (num_total_gts, h, w)\n        mask_preds = mask_preds[mask_weights > 0]\n        target_shape = mask_targets.shape[-2:]\n\n        if mask_targets.shape[0] == 0:\n            # zero match\n            loss_dice = mask_preds.sum()\n            loss_mask = mask_preds.sum()\n            return loss_cls, loss_mask, loss_dice\n\n        # upsample to shape of target\n        # shape (num_total_gts, h, w)\n        mask_preds = F.interpolate(\n            mask_preds.unsqueeze(1),\n            target_shape,\n            mode='bilinear',\n            align_corners=False).squeeze(1)\n\n        # dice loss\n        loss_dice = self.loss_dice(\n            mask_preds, mask_targets, avg_factor=num_total_masks)\n\n        # mask loss\n        # FocalLoss support input of shape (n, num_class)\n        h, w = mask_preds.shape[-2:]\n        # shape (num_total_gts, h, w) -> (num_total_gts * h * w, 1)\n        mask_preds = mask_preds.reshape(-1, 1)\n        # shape (num_total_gts, h, w) -> (num_total_gts * h * w)\n        mask_targets = mask_targets.reshape(-1)\n        # target is (1 - mask_targets) !!!\n        loss_mask = self.loss_mask(\n            mask_preds, 1 - mask_targets, avg_factor=num_total_masks * h * w)\n\n        return loss_cls, loss_mask, loss_dice\n\n    def forward(self, feats, img_metas):\n        \"\"\"Forward function.\n\n        Args:\n            feats (list[Tensor]): Features from the upstream network, each\n                is a 4D-tensor.\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            tuple: a tuple contains two elements.\n                - all_cls_scores (Tensor): Classification scores for each\\\n                    scale level. Each is a 4D-tensor with shape\\\n                    (num_decoder, batch_size, num_queries, cls_out_channels).\\\n                    Note `cls_out_channels` should includes background.\n                - all_mask_preds (Tensor): Mask scores for each decoder\\\n                    layer. Each with shape (num_decoder, batch_size,\\\n                    num_queries, h, w).\n        \"\"\"\n        batch_size = len(img_metas)\n        input_img_h, input_img_w = img_metas[0]['batch_input_shape']\n        padding_mask = feats[-1].new_ones(\n            (batch_size, input_img_h, input_img_w), dtype=torch.float32)\n        for i in range(batch_size):\n            img_h, img_w, _ = img_metas[i]['img_shape']\n            padding_mask[i, :img_h, :img_w] = 0\n        padding_mask = F.interpolate(\n            padding_mask.unsqueeze(1),\n            size=feats[-1].shape[-2:],\n            mode='nearest').to(torch.bool).squeeze(1)\n        # when backbone is swin, memory is output of last stage of swin.\n        # when backbone is r50, memory is output of tranformer encoder.\n        mask_features, memory = self.pixel_decoder(feats, img_metas)\n        pos_embed = self.decoder_pe(padding_mask)\n        memory = self.decoder_input_proj(memory)\n        # shape (batch_size, c, h, w) -> (h*w, batch_size, c)\n        memory = memory.flatten(2).permute(2, 0, 1)\n        pos_embed = pos_embed.flatten(2).permute(2, 0, 1)\n        # shape (batch_size, h * w)\n        padding_mask = padding_mask.flatten(1)\n        # shape = (num_queries, embed_dims)\n        query_embed = self.query_embed.weight\n        # shape = (num_queries, batch_size, embed_dims)\n        query_embed = query_embed.unsqueeze(1).repeat(1, batch_size, 1)\n        target = torch.zeros_like(query_embed)\n        # shape (num_decoder, num_queries, batch_size, embed_dims)\n        out_dec = self.transformer_decoder(\n            query=target,\n            key=memory,\n            value=memory,\n            key_pos=pos_embed,\n            query_pos=query_embed,\n            key_padding_mask=padding_mask)\n        # shape (num_decoder, batch_size, num_queries, embed_dims)\n        out_dec = out_dec.transpose(1, 2)\n\n        # cls_scores\n        all_cls_scores = self.cls_embed(out_dec)\n\n        # mask_preds\n        mask_embed = self.mask_embed(out_dec)\n        all_mask_preds = torch.einsum('lbqc,bchw->lbqhw', mask_embed,\n                                      mask_features)\n\n        return all_cls_scores, all_mask_preds\n\n    def forward_train(self,\n                      feats,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_masks,\n                      gt_semantic_seg,\n                      gt_bboxes_ignore=None):\n        \"\"\"Forward function for training mode.\n\n        Args:\n            feats (list[Tensor]): Multi-level features from the upstream\n                network, each is a 4D-tensor.\n            img_metas (list[Dict]): List of image information.\n            gt_bboxes (list[Tensor]): Each element is ground truth bboxes of\n                the image, shape (num_gts, 4). Not used here.\n            gt_labels (list[Tensor]): Each element is ground truth labels of\n                each box, shape (num_gts,).\n            gt_masks (list[BitmapMasks]): Each element is masks of instances\n                of a image, shape (num_gts, h, w).\n            gt_semantic_seg (list[tensor] | None): Each element is the ground\n                truth of semantic segmentation with the shape (N, H, W).\n                [0, num_thing_class - 1] means things,\n                [num_thing_class, num_class-1] means stuff,\n                255 means VOID. It's None when training instance segmentation.\n            gt_bboxes_ignore (list[Tensor]): Ground truth bboxes to be\n                ignored. Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        # not consider ignoring bboxes\n        assert gt_bboxes_ignore is None\n\n        # forward\n        all_cls_scores, all_mask_preds = self(feats, img_metas)\n\n        # preprocess ground truth\n        gt_labels, gt_masks = self.preprocess_gt(gt_labels, gt_masks,\n                                                 gt_semantic_seg, img_metas)\n\n        # loss\n        losses = self.loss(all_cls_scores, all_mask_preds, gt_labels, gt_masks,\n                           img_metas)\n\n        return losses\n\n    def simple_test(self, feats, img_metas, **kwargs):\n        \"\"\"Test without augmentaton.\n\n        Args:\n            feats (list[Tensor]): Multi-level features from the\n                upstream network, each is a 4D-tensor.\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            tuple: A tuple contains two tensors.\n\n            - mask_cls_results (Tensor): Mask classification logits,\\\n                shape (batch_size, num_queries, cls_out_channels).\n                Note `cls_out_channels` should includes background.\n            - mask_pred_results (Tensor): Mask logits, shape \\\n                (batch_size, num_queries, h, w).\n        \"\"\"\n        all_cls_scores, all_mask_preds = self(feats, img_metas)\n        mask_cls_results = all_cls_scores[-1]\n        mask_pred_results = all_mask_preds[-1]\n\n        # upsample masks\n        img_shape = img_metas[0]['batch_input_shape']\n        mask_pred_results = F.interpolate(\n            mask_pred_results,\n            size=(img_shape[0], img_shape[1]),\n            mode='bilinear',\n            align_corners=False)\n\n        return mask_cls_results, mask_pred_results\n"
  },
  {
    "path": "mmdet/models/dense_heads/nasfcos_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scale\n\nfrom mmdet.models.dense_heads.fcos_head import FCOSHead\nfrom ..builder import HEADS\n\n\n@HEADS.register_module()\nclass NASFCOSHead(FCOSHead):\n    \"\"\"Anchor-free head used in `NASFCOS <https://arxiv.org/abs/1906.04423>`_.\n\n    It is quite similar with FCOS head, except for the searched structure of\n    classification branch and bbox regression branch, where a structure of\n    \"dconv3x3, conv3x3, dconv3x3, conv1x1\" is utilized instead.\n    \"\"\"\n\n    def __init__(self, *args, init_cfg=None, **kwargs):\n        if init_cfg is None:\n            init_cfg = [\n                dict(type='Caffe2Xavier', layer=['ConvModule', 'Conv2d']),\n                dict(\n                    type='Normal',\n                    std=0.01,\n                    override=[\n                        dict(name='conv_reg'),\n                        dict(name='conv_centerness'),\n                        dict(\n                            name='conv_cls',\n                            type='Normal',\n                            std=0.01,\n                            bias_prob=0.01)\n                    ]),\n            ]\n        super(NASFCOSHead, self).__init__(*args, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        dconv3x3_config = dict(\n            type='DCNv2',\n            kernel_size=3,\n            use_bias=True,\n            deform_groups=2,\n            padding=1)\n        conv3x3_config = dict(type='Conv', kernel_size=3, padding=1)\n        conv1x1_config = dict(type='Conv', kernel_size=1)\n\n        self.arch_config = [\n            dconv3x3_config, conv3x3_config, dconv3x3_config, conv1x1_config\n        ]\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i, op_ in enumerate(self.arch_config):\n            op = copy.deepcopy(op_)\n            chn = self.in_channels if i == 0 else self.feat_channels\n            assert isinstance(op, dict)\n            use_bias = op.pop('use_bias', False)\n            padding = op.pop('padding', 0)\n            kernel_size = op.pop('kernel_size')\n            module = ConvModule(\n                chn,\n                self.feat_channels,\n                kernel_size,\n                stride=1,\n                padding=padding,\n                norm_cfg=self.norm_cfg,\n                bias=use_bias,\n                conv_cfg=op)\n\n            self.cls_convs.append(copy.deepcopy(module))\n            self.reg_convs.append(copy.deepcopy(module))\n\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n        self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n        self.conv_centerness = nn.Conv2d(self.feat_channels, 1, 3, padding=1)\n\n        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n"
  },
  {
    "path": "mmdet/models/dense_heads/paa_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import multi_apply, multiclass_nms\nfrom mmdet.core.bbox.iou_calculators import bbox_overlaps\nfrom mmdet.models import HEADS\nfrom mmdet.models.dense_heads import ATSSHead\n\nEPS = 1e-12\ntry:\n    import sklearn.mixture as skm\nexcept ImportError:\n    skm = None\n\n\ndef levels_to_images(mlvl_tensor):\n    \"\"\"Concat multi-level feature maps by image.\n\n    [feature_level0, feature_level1...] -> [feature_image0, feature_image1...]\n    Convert the shape of each element in mlvl_tensor from (N, C, H, W) to\n    (N, H*W , C), then split the element to N elements with shape (H*W, C), and\n    concat elements in same image of all level along first dimension.\n\n    Args:\n        mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from\n            corresponding level. Each element is of shape (N, C, H, W)\n\n    Returns:\n        list[torch.Tensor]: A list that contains N tensors and each tensor is\n            of shape (num_elements, C)\n    \"\"\"\n    batch_size = mlvl_tensor[0].size(0)\n    batch_list = [[] for _ in range(batch_size)]\n    channels = mlvl_tensor[0].size(1)\n    for t in mlvl_tensor:\n        t = t.permute(0, 2, 3, 1)\n        t = t.view(batch_size, -1, channels).contiguous()\n        for img in range(batch_size):\n            batch_list[img].append(t[img])\n    return [torch.cat(item, 0) for item in batch_list]\n\n\n@HEADS.register_module()\nclass PAAHead(ATSSHead):\n    \"\"\"Head of PAAAssignment: Probabilistic Anchor Assignment with IoU\n    Prediction for Object Detection.\n\n    Code is modified from the `official github repo\n    <https://github.com/kkhoot/PAA/blob/master/paa_core\n    /modeling/rpn/paa/loss.py>`_.\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/2007.08103>`_ .\n\n    Args:\n        topk (int): Select topk samples with smallest loss in\n            each level.\n        score_voting (bool): Whether to use score voting in post-process.\n        covariance_type : String describing the type of covariance parameters\n            to be used in :class:`sklearn.mixture.GaussianMixture`.\n            It must be one of:\n\n            - 'full': each component has its own general covariance matrix\n            - 'tied': all components share the same general covariance matrix\n            - 'diag': each component has its own diagonal covariance matrix\n            - 'spherical': each component has its own single variance\n            Default: 'diag'. From 'full' to 'spherical', the gmm fitting\n            process is faster yet the performance could be influenced. For most\n            cases, 'diag' should be a good choice.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 topk=9,\n                 score_voting=True,\n                 covariance_type='diag',\n                 **kwargs):\n        # topk used in paa reassign process\n        self.topk = topk\n        self.with_score_voting = score_voting\n        self.covariance_type = covariance_type\n        super(PAAHead, self).__init__(*args, **kwargs)\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'iou_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             iou_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            iou_preds (list[Tensor]): iou_preds for each scale\n                level with shape (N, num_anchors * 1, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n                boxes can be ignored when are computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss gmm_assignment.\n        \"\"\"\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels,\n        )\n        (labels, labels_weight, bboxes_target, bboxes_weight, pos_inds,\n         pos_gt_index) = cls_reg_targets\n        cls_scores = levels_to_images(cls_scores)\n        cls_scores = [\n            item.reshape(-1, self.cls_out_channels) for item in cls_scores\n        ]\n        bbox_preds = levels_to_images(bbox_preds)\n        bbox_preds = [item.reshape(-1, 4) for item in bbox_preds]\n        iou_preds = levels_to_images(iou_preds)\n        iou_preds = [item.reshape(-1, 1) for item in iou_preds]\n        pos_losses_list, = multi_apply(self.get_pos_loss, anchor_list,\n                                       cls_scores, bbox_preds, labels,\n                                       labels_weight, bboxes_target,\n                                       bboxes_weight, pos_inds)\n\n        with torch.no_grad():\n            reassign_labels, reassign_label_weight, \\\n                reassign_bbox_weights, num_pos = multi_apply(\n                    self.paa_reassign,\n                    pos_losses_list,\n                    labels,\n                    labels_weight,\n                    bboxes_weight,\n                    pos_inds,\n                    pos_gt_index,\n                    anchor_list)\n            num_pos = sum(num_pos)\n        # convert all tensor list to a flatten tensor\n        cls_scores = torch.cat(cls_scores, 0).view(-1, cls_scores[0].size(-1))\n        bbox_preds = torch.cat(bbox_preds, 0).view(-1, bbox_preds[0].size(-1))\n        iou_preds = torch.cat(iou_preds, 0).view(-1, iou_preds[0].size(-1))\n        labels = torch.cat(reassign_labels, 0).view(-1)\n        flatten_anchors = torch.cat(\n            [torch.cat(item, 0) for item in anchor_list])\n        labels_weight = torch.cat(reassign_label_weight, 0).view(-1)\n        bboxes_target = torch.cat(bboxes_target,\n                                  0).view(-1, bboxes_target[0].size(-1))\n\n        pos_inds_flatten = ((labels >= 0)\n                            &\n                            (labels < self.num_classes)).nonzero().reshape(-1)\n\n        losses_cls = self.loss_cls(\n            cls_scores,\n            labels,\n            labels_weight,\n            avg_factor=max(num_pos, len(img_metas)))  # avoid num_pos=0\n        if num_pos:\n            pos_bbox_pred = self.bbox_coder.decode(\n                flatten_anchors[pos_inds_flatten],\n                bbox_preds[pos_inds_flatten])\n            pos_bbox_target = bboxes_target[pos_inds_flatten]\n            iou_target = bbox_overlaps(\n                pos_bbox_pred.detach(), pos_bbox_target, is_aligned=True)\n            losses_iou = self.loss_centerness(\n                iou_preds[pos_inds_flatten],\n                iou_target.unsqueeze(-1),\n                avg_factor=num_pos)\n            losses_bbox = self.loss_bbox(\n                pos_bbox_pred,\n                pos_bbox_target,\n                iou_target.clamp(min=EPS),\n                avg_factor=iou_target.sum())\n        else:\n            losses_iou = iou_preds.sum() * 0\n            losses_bbox = bbox_preds.sum() * 0\n\n        return dict(\n            loss_cls=losses_cls, loss_bbox=losses_bbox, loss_iou=losses_iou)\n\n    def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_weight,\n                     bbox_target, bbox_weight, pos_inds):\n        \"\"\"Calculate loss of all potential positive samples obtained from first\n        match process.\n\n        Args:\n            anchors (list[Tensor]): Anchors of each scale.\n            cls_score (Tensor): Box scores of single image with shape\n                (num_anchors, num_classes)\n            bbox_pred (Tensor): Box energies / deltas of single image\n                with shape (num_anchors, 4)\n            label (Tensor): classification target of each anchor with\n                shape (num_anchors,)\n            label_weight (Tensor): Classification loss weight of each\n                anchor with shape (num_anchors).\n            bbox_target (dict): Regression target of each anchor with\n                shape (num_anchors, 4).\n            bbox_weight (Tensor): Bbox weight of each anchor with shape\n                (num_anchors, 4).\n            pos_inds (Tensor): Index of all positive samples got from\n                first assign process.\n\n        Returns:\n            Tensor: Losses of all positive samples in single image.\n        \"\"\"\n        if not len(pos_inds):\n            return cls_score.new([]),\n        anchors_all_level = torch.cat(anchors, 0)\n        pos_scores = cls_score[pos_inds]\n        pos_bbox_pred = bbox_pred[pos_inds]\n        pos_label = label[pos_inds]\n        pos_label_weight = label_weight[pos_inds]\n        pos_bbox_target = bbox_target[pos_inds]\n        pos_bbox_weight = bbox_weight[pos_inds]\n        pos_anchors = anchors_all_level[pos_inds]\n        pos_bbox_pred = self.bbox_coder.decode(pos_anchors, pos_bbox_pred)\n\n        # to keep loss dimension\n        loss_cls = self.loss_cls(\n            pos_scores,\n            pos_label,\n            pos_label_weight,\n            avg_factor=1.0,\n            reduction_override='none')\n\n        loss_bbox = self.loss_bbox(\n            pos_bbox_pred,\n            pos_bbox_target,\n            pos_bbox_weight,\n            avg_factor=1.0,  # keep same loss weight before reassign\n            reduction_override='none')\n\n        loss_cls = loss_cls.sum(-1)\n        pos_loss = loss_bbox + loss_cls\n        return pos_loss,\n\n    def paa_reassign(self, pos_losses, label, label_weight, bbox_weight,\n                     pos_inds, pos_gt_inds, anchors):\n        \"\"\"Fit loss to GMM distribution and separate positive, ignore, negative\n        samples again with GMM model.\n\n        Args:\n            pos_losses (Tensor): Losses of all positive samples in\n                single image.\n            label (Tensor): classification target of each anchor with\n                shape (num_anchors,)\n            label_weight (Tensor): Classification loss weight of each\n                anchor with shape (num_anchors).\n            bbox_weight (Tensor): Bbox weight of each anchor with shape\n                (num_anchors, 4).\n            pos_inds (Tensor): Index of all positive samples got from\n                first assign process.\n            pos_gt_inds (Tensor): Gt_index of all positive samples got\n                from first assign process.\n            anchors (list[Tensor]): Anchors of each scale.\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - label (Tensor): classification target of each anchor after\n                  paa assign, with shape (num_anchors,)\n                - label_weight (Tensor): Classification loss weight of each\n                  anchor after paa assign, with shape (num_anchors).\n                - bbox_weight (Tensor): Bbox weight of each anchor with shape\n                  (num_anchors, 4).\n                - num_pos (int): The number of positive samples after paa\n                  assign.\n        \"\"\"\n        if not len(pos_inds):\n            return label, label_weight, bbox_weight, 0\n        label = label.clone()\n        label_weight = label_weight.clone()\n        bbox_weight = bbox_weight.clone()\n        num_gt = pos_gt_inds.max() + 1\n        num_level = len(anchors)\n        num_anchors_each_level = [item.size(0) for item in anchors]\n        num_anchors_each_level.insert(0, 0)\n        inds_level_interval = np.cumsum(num_anchors_each_level)\n        pos_level_mask = []\n        for i in range(num_level):\n            mask = (pos_inds >= inds_level_interval[i]) & (\n                pos_inds < inds_level_interval[i + 1])\n            pos_level_mask.append(mask)\n        pos_inds_after_paa = [label.new_tensor([])]\n        ignore_inds_after_paa = [label.new_tensor([])]\n        for gt_ind in range(num_gt):\n            pos_inds_gmm = []\n            pos_loss_gmm = []\n            gt_mask = pos_gt_inds == gt_ind\n            for level in range(num_level):\n                level_mask = pos_level_mask[level]\n                level_gt_mask = level_mask & gt_mask\n                value, topk_inds = pos_losses[level_gt_mask].topk(\n                    min(level_gt_mask.sum(), self.topk), largest=False)\n                pos_inds_gmm.append(pos_inds[level_gt_mask][topk_inds])\n                pos_loss_gmm.append(value)\n            pos_inds_gmm = torch.cat(pos_inds_gmm)\n            pos_loss_gmm = torch.cat(pos_loss_gmm)\n            # fix gmm need at least two sample\n            if len(pos_inds_gmm) < 2:\n                continue\n            device = pos_inds_gmm.device\n            pos_loss_gmm, sort_inds = pos_loss_gmm.sort()\n            pos_inds_gmm = pos_inds_gmm[sort_inds]\n            pos_loss_gmm = pos_loss_gmm.view(-1, 1).cpu().numpy()\n            min_loss, max_loss = pos_loss_gmm.min(), pos_loss_gmm.max()\n            means_init = np.array([min_loss, max_loss]).reshape(2, 1)\n            weights_init = np.array([0.5, 0.5])\n            precisions_init = np.array([1.0, 1.0]).reshape(2, 1, 1)  # full\n            if self.covariance_type == 'spherical':\n                precisions_init = precisions_init.reshape(2)\n            elif self.covariance_type == 'diag':\n                precisions_init = precisions_init.reshape(2, 1)\n            elif self.covariance_type == 'tied':\n                precisions_init = np.array([[1.0]])\n            if skm is None:\n                raise ImportError('Please run \"pip install sklearn\" '\n                                  'to install sklearn first.')\n            gmm = skm.GaussianMixture(\n                2,\n                weights_init=weights_init,\n                means_init=means_init,\n                precisions_init=precisions_init,\n                covariance_type=self.covariance_type)\n            gmm.fit(pos_loss_gmm)\n            gmm_assignment = gmm.predict(pos_loss_gmm)\n            scores = gmm.score_samples(pos_loss_gmm)\n            gmm_assignment = torch.from_numpy(gmm_assignment).to(device)\n            scores = torch.from_numpy(scores).to(device)\n\n            pos_inds_temp, ignore_inds_temp = self.gmm_separation_scheme(\n                gmm_assignment, scores, pos_inds_gmm)\n            pos_inds_after_paa.append(pos_inds_temp)\n            ignore_inds_after_paa.append(ignore_inds_temp)\n\n        pos_inds_after_paa = torch.cat(pos_inds_after_paa)\n        ignore_inds_after_paa = torch.cat(ignore_inds_after_paa)\n        reassign_mask = (pos_inds.unsqueeze(1) != pos_inds_after_paa).all(1)\n        reassign_ids = pos_inds[reassign_mask]\n        label[reassign_ids] = self.num_classes\n        label_weight[ignore_inds_after_paa] = 0\n        bbox_weight[reassign_ids] = 0\n        num_pos = len(pos_inds_after_paa)\n        return label, label_weight, bbox_weight, num_pos\n\n    def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm):\n        \"\"\"A general separation scheme for gmm model.\n\n        It separates a GMM distribution of candidate samples into three\n        parts, 0 1 and uncertain areas, and you can implement other\n        separation schemes by rewriting this function.\n\n        Args:\n            gmm_assignment (Tensor): The prediction of GMM which is of shape\n                (num_samples,). The 0/1 value indicates the distribution\n                that each sample comes from.\n            scores (Tensor): The probability of sample coming from the\n                fit GMM distribution. The tensor is of shape (num_samples,).\n            pos_inds_gmm (Tensor): All the indexes of samples which are used\n                to fit GMM model. The tensor is of shape (num_samples,)\n\n        Returns:\n            tuple[Tensor]: The indices of positive and ignored samples.\n\n                - pos_inds_temp (Tensor): Indices of positive samples.\n                - ignore_inds_temp (Tensor): Indices of ignore samples.\n        \"\"\"\n        # The implementation is (c) in Fig.3 in origin paper instead of (b).\n        # You can refer to issues such as\n        # https://github.com/kkhoot/PAA/issues/8 and\n        # https://github.com/kkhoot/PAA/issues/9.\n        fgs = gmm_assignment == 0\n        pos_inds_temp = fgs.new_tensor([], dtype=torch.long)\n        ignore_inds_temp = fgs.new_tensor([], dtype=torch.long)\n        if fgs.nonzero().numel():\n            _, pos_thr_ind = scores[fgs].topk(1)\n            pos_inds_temp = pos_inds_gmm[fgs][:pos_thr_ind + 1]\n            ignore_inds_temp = pos_inds_gmm.new_tensor([])\n        return pos_inds_temp, ignore_inds_temp\n\n    def get_targets(\n        self,\n        anchor_list,\n        valid_flag_list,\n        gt_bboxes_list,\n        img_metas,\n        gt_bboxes_ignore_list=None,\n        gt_labels_list=None,\n        label_channels=1,\n        unmap_outputs=True,\n    ):\n        \"\"\"Get targets for PAA head.\n\n        This method is almost the same as `AnchorHead.get_targets()`. We direct\n        return the results from _get_targets_single instead map it to levels\n        by images_to_levels function.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, 4).\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be\n                ignored.\n            gt_labels_list (list[Tensor]): Ground truth labels of each box.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - labels (list[Tensor]): Labels of all anchors, each with\n                    shape (num_anchors,).\n                - label_weights (list[Tensor]): Label weights of all anchor.\n                    each with shape (num_anchors,).\n                - bbox_targets (list[Tensor]): BBox targets of all anchors.\n                    each with shape (num_anchors, 4).\n                - bbox_weights (list[Tensor]): BBox weights of all anchors.\n                    each with shape (num_anchors, 4).\n                - pos_inds (list[Tensor]): Contains all index of positive\n                    sample in all anchor.\n                - gt_inds (list[Tensor]): Contains all gt_index of positive\n                    sample in all anchor.\n        \"\"\"\n\n        num_imgs = len(img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n        concat_anchor_list = []\n        concat_valid_flag_list = []\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            concat_anchor_list.append(torch.cat(anchor_list[i]))\n            concat_valid_flag_list.append(torch.cat(valid_flag_list[i]))\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n        if gt_labels_list is None:\n            gt_labels_list = [None for _ in range(num_imgs)]\n        results = multi_apply(\n            self._get_targets_single,\n            concat_anchor_list,\n            concat_valid_flag_list,\n            gt_bboxes_list,\n            gt_bboxes_ignore_list,\n            gt_labels_list,\n            img_metas,\n            label_channels=label_channels,\n            unmap_outputs=unmap_outputs)\n\n        (labels, label_weights, bbox_targets, bbox_weights, valid_pos_inds,\n         valid_neg_inds, sampling_result) = results\n\n        # Due to valid flag of anchors, we have to calculate the real pos_inds\n        # in origin anchor set.\n        pos_inds = []\n        for i, single_labels in enumerate(labels):\n            pos_mask = (0 <= single_labels) & (\n                single_labels < self.num_classes)\n            pos_inds.append(pos_mask.nonzero().view(-1))\n\n        gt_inds = [item.pos_assigned_gt_inds for item in sampling_result]\n        return (labels, label_weights, bbox_targets, bbox_weights, pos_inds,\n                gt_inds)\n\n    def _get_targets_single(self,\n                            flat_anchors,\n                            valid_flags,\n                            gt_bboxes,\n                            gt_bboxes_ignore,\n                            gt_labels,\n                            img_meta,\n                            label_channels=1,\n                            unmap_outputs=True):\n        \"\"\"Compute regression and classification targets for anchors in a\n        single image.\n\n        This method is same as `AnchorHead._get_targets_single()`.\n        \"\"\"\n        assert unmap_outputs, 'We must map outputs back to the original' \\\n                              'set of anchors in PAAhead'\n        return super(ATSSHead, self)._get_targets_single(\n            flat_anchors,\n            valid_flags,\n            gt_bboxes,\n            gt_bboxes_ignore,\n            gt_labels,\n            img_meta,\n            label_channels=1,\n            unmap_outputs=True)\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def get_bboxes(self,\n                   cls_scores,\n                   bbox_preds,\n                   score_factors=None,\n                   img_metas=None,\n                   cfg=None,\n                   rescale=False,\n                   with_nms=True,\n                   **kwargs):\n        assert with_nms, 'PAA only supports \"with_nms=True\" now and it ' \\\n                         'means PAAHead does not support ' \\\n                         'test-time augmentation'\n        return super(ATSSHead, self).get_bboxes(cls_scores, bbox_preds,\n                                                score_factors, img_metas, cfg,\n                                                rescale, with_nms, **kwargs)\n\n    def _get_bboxes_single(self,\n                           cls_score_list,\n                           bbox_pred_list,\n                           score_factor_list,\n                           mlvl_priors,\n                           img_meta,\n                           cfg,\n                           rescale=False,\n                           with_nms=True,\n                           **kwargs):\n        \"\"\"Transform outputs of a single image into bbox predictions.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factors from all scale\n                levels of a single image, each item has shape\n                (num_priors * 1, H, W).\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid, has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n                is False and mlvl_score_factor is None, return mlvl_bboxes and\n                mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n                mlvl_score_factor. Usually with_nms is False is used for aug\n                test. If with_nms is True, then return the following format\n\n                - det_bboxes (Tensor): Predicted bboxes with shape \\\n                    [num_bboxes, 5], where the first 4 columns are bounding \\\n                    box positions (tl_x, tl_y, br_x, br_y) and the 5-th \\\n                    column are scores between 0 and 1.\n                - det_labels (Tensor): Predicted labels of the corresponding \\\n                    box with shape [num_bboxes].\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_score_factors = []\n        for level_idx, (cls_score, bbox_pred, score_factor, priors) in \\\n                enumerate(zip(cls_score_list, bbox_pred_list,\n                              score_factor_list, mlvl_priors)):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            scores = cls_score.permute(1, 2, 0).reshape(\n                -1, self.cls_out_channels).sigmoid()\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n            score_factor = score_factor.permute(1, 2, 0).reshape(-1).sigmoid()\n\n            if 0 < nms_pre < scores.shape[0]:\n                max_scores, _ = (scores *\n                                 score_factor[:, None]).sqrt().max(dim=1)\n                _, topk_inds = max_scores.topk(nms_pre)\n                priors = priors[topk_inds, :]\n                bbox_pred = bbox_pred[topk_inds, :]\n                scores = scores[topk_inds, :]\n                score_factor = score_factor[topk_inds]\n\n            bboxes = self.bbox_coder.decode(\n                priors, bbox_pred, max_shape=img_shape)\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_score_factors.append(score_factor)\n\n        return self._bbox_post_process(mlvl_scores, mlvl_bboxes,\n                                       img_meta['scale_factor'], cfg, rescale,\n                                       with_nms, mlvl_score_factors, **kwargs)\n\n    def _bbox_post_process(self,\n                           mlvl_scores,\n                           mlvl_bboxes,\n                           scale_factor,\n                           cfg,\n                           rescale=False,\n                           with_nms=True,\n                           mlvl_score_factors=None,\n                           **kwargs):\n        \"\"\"bbox post-processing method.\n\n        The boxes would be rescaled to the original image scale and do\n        the nms operation. Usually with_nms is False is used for aug test.\n\n        Args:\n            mlvl_scores (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_bboxes, num_class).\n            mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale\n                levels of a single image, each item has shape (num_bboxes, 4).\n            scale_factor (ndarray, optional): Scale factor of the image arange\n                as (w_scale, h_scale, w_scale, h_scale).\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n            mlvl_score_factors (list[Tensor], optional): Score factor from\n                all scale levels of a single image, each item has shape\n                (num_bboxes, ). Default: None.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n                is False and mlvl_score_factor is None, return mlvl_bboxes and\n                mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n                mlvl_score_factor. Usually with_nms is False is used for aug\n                test. If with_nms is True, then return the following format\n\n                - det_bboxes (Tensor): Predicted bboxes with shape \\\n                    [num_bboxes, 5], where the first 4 columns are bounding \\\n                    box positions (tl_x, tl_y, br_x, br_y) and the 5-th \\\n                    column are scores between 0 and 1.\n                - det_labels (Tensor): Predicted labels of the corresponding \\\n                    box with shape [num_bboxes].\n        \"\"\"\n        mlvl_bboxes = torch.cat(mlvl_bboxes)\n        if rescale:\n            mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n        mlvl_scores = torch.cat(mlvl_scores)\n        # Add a dummy background class to the backend when using sigmoid\n        # remind that we set FG labels to [0, num_class-1] since mmdet v2.0\n        # BG cat_id: num_class\n        padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n        mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)\n\n        mlvl_iou_preds = torch.cat(mlvl_score_factors)\n        mlvl_nms_scores = (mlvl_scores * mlvl_iou_preds[:, None]).sqrt()\n        det_bboxes, det_labels = multiclass_nms(\n            mlvl_bboxes,\n            mlvl_nms_scores,\n            cfg.score_thr,\n            cfg.nms,\n            cfg.max_per_img,\n            score_factors=None)\n        if self.with_score_voting and len(det_bboxes) > 0:\n            det_bboxes, det_labels = self.score_voting(det_bboxes, det_labels,\n                                                       mlvl_bboxes,\n                                                       mlvl_nms_scores,\n                                                       cfg.score_thr)\n\n        return det_bboxes, det_labels\n\n    def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,\n                     mlvl_nms_scores, score_thr):\n        \"\"\"Implementation of score voting method works on each remaining boxes\n        after NMS procedure.\n\n        Args:\n            det_bboxes (Tensor): Remaining boxes after NMS procedure,\n                with shape (k, 5), each dimension means\n                (x1, y1, x2, y2, score).\n            det_labels (Tensor): The label of remaining boxes, with shape\n                (k, 1),Labels are 0-based.\n            mlvl_bboxes (Tensor): All boxes before the NMS procedure,\n                with shape (num_anchors,4).\n            mlvl_nms_scores (Tensor): The scores of all boxes which is used\n                in the NMS procedure, with shape (num_anchors, num_class)\n            score_thr (float): The score threshold of bboxes.\n\n        Returns:\n            tuple: Usually returns a tuple containing voting results.\n\n                - det_bboxes_voted (Tensor): Remaining boxes after\n                    score voting procedure, with shape (k, 5), each\n                    dimension means (x1, y1, x2, y2, score).\n                - det_labels_voted (Tensor): Label of remaining bboxes\n                    after voting, with shape (num_anchors,).\n        \"\"\"\n        candidate_mask = mlvl_nms_scores > score_thr\n        candidate_mask_nonzeros = candidate_mask.nonzero(as_tuple=False)\n        candidate_inds = candidate_mask_nonzeros[:, 0]\n        candidate_labels = candidate_mask_nonzeros[:, 1]\n        candidate_bboxes = mlvl_bboxes[candidate_inds]\n        candidate_scores = mlvl_nms_scores[candidate_mask]\n        det_bboxes_voted = []\n        det_labels_voted = []\n        for cls in range(self.cls_out_channels):\n            candidate_cls_mask = candidate_labels == cls\n            if not candidate_cls_mask.any():\n                continue\n            candidate_cls_scores = candidate_scores[candidate_cls_mask]\n            candidate_cls_bboxes = candidate_bboxes[candidate_cls_mask]\n            det_cls_mask = det_labels == cls\n            det_cls_bboxes = det_bboxes[det_cls_mask].view(\n                -1, det_bboxes.size(-1))\n            det_candidate_ious = bbox_overlaps(det_cls_bboxes[:, :4],\n                                               candidate_cls_bboxes)\n            for det_ind in range(len(det_cls_bboxes)):\n                single_det_ious = det_candidate_ious[det_ind]\n                pos_ious_mask = single_det_ious > 0.01\n                pos_ious = single_det_ious[pos_ious_mask]\n                pos_bboxes = candidate_cls_bboxes[pos_ious_mask]\n                pos_scores = candidate_cls_scores[pos_ious_mask]\n                pis = (torch.exp(-(1 - pos_ious)**2 / 0.025) *\n                       pos_scores)[:, None]\n                voted_box = torch.sum(\n                    pis * pos_bboxes, dim=0) / torch.sum(\n                        pis, dim=0)\n                voted_score = det_cls_bboxes[det_ind][-1:][None, :]\n                det_bboxes_voted.append(\n                    torch.cat((voted_box[None, :], voted_score), dim=1))\n                det_labels_voted.append(cls)\n\n        det_bboxes_voted = torch.cat(det_bboxes_voted, dim=0)\n        det_labels_voted = det_labels.new_tensor(det_labels_voted)\n        return det_bboxes_voted, det_labels_voted\n"
  },
  {
    "path": "mmdet/models/dense_heads/pisa_retinanet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import images_to_levels\nfrom ..builder import HEADS\nfrom ..losses import carl_loss, isr_p\nfrom .retina_head import RetinaHead\n\n\n@HEADS.register_module()\nclass PISARetinaHead(RetinaHead):\n    \"\"\"PISA Retinanet Head.\n\n    The head owns the same structure with Retinanet Head, but differs in two\n        aspects:\n        1. Importance-based Sample Reweighting Positive (ISR-P) is applied to\n            change the positive loss weights.\n        2. Classification-aware regression loss is adopted as a third loss.\n    \"\"\"\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes of each image\n                with shape (num_obj, 4).\n            gt_labels (list[Tensor]): Ground truth labels of each image\n                with shape (num_obj, 4).\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.\n                Default: None.\n\n        Returns:\n            dict: Loss dict, comprise classification loss, regression loss and\n                carl loss.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels,\n            return_sampling_results=True)\n        if cls_reg_targets is None:\n            return None\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets\n        num_total_samples = (\n            num_total_pos + num_total_neg if self.sampling else num_total_pos)\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        # concat all level anchors and flags to a single tensor\n        concat_anchor_list = []\n        for i in range(len(anchor_list)):\n            concat_anchor_list.append(torch.cat(anchor_list[i]))\n        all_anchor_list = images_to_levels(concat_anchor_list,\n                                           num_level_anchors)\n\n        num_imgs = len(img_metas)\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1, label_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_cls_scores = torch.cat(\n            flatten_cls_scores, dim=1).reshape(-1,\n                                               flatten_cls_scores[0].size(-1))\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_bbox_preds = torch.cat(\n            flatten_bbox_preds, dim=1).view(-1, flatten_bbox_preds[0].size(-1))\n        flatten_labels = torch.cat(labels_list, dim=1).reshape(-1)\n        flatten_label_weights = torch.cat(\n            label_weights_list, dim=1).reshape(-1)\n        flatten_anchors = torch.cat(all_anchor_list, dim=1).reshape(-1, 4)\n        flatten_bbox_targets = torch.cat(\n            bbox_targets_list, dim=1).reshape(-1, 4)\n        flatten_bbox_weights = torch.cat(\n            bbox_weights_list, dim=1).reshape(-1, 4)\n\n        # Apply ISR-P\n        isr_cfg = self.train_cfg.get('isr', None)\n        if isr_cfg is not None:\n            all_targets = (flatten_labels, flatten_label_weights,\n                           flatten_bbox_targets, flatten_bbox_weights)\n            with torch.no_grad():\n                all_targets = isr_p(\n                    flatten_cls_scores,\n                    flatten_bbox_preds,\n                    all_targets,\n                    flatten_anchors,\n                    sampling_results_list,\n                    bbox_coder=self.bbox_coder,\n                    loss_cls=self.loss_cls,\n                    num_class=self.num_classes,\n                    **self.train_cfg.isr)\n            (flatten_labels, flatten_label_weights, flatten_bbox_targets,\n             flatten_bbox_weights) = all_targets\n\n        # For convenience we compute loss once instead separating by fpn level,\n        # so that we don't need to separate the weights by level again.\n        # The result should be the same\n        losses_cls = self.loss_cls(\n            flatten_cls_scores,\n            flatten_labels,\n            flatten_label_weights,\n            avg_factor=num_total_samples)\n        losses_bbox = self.loss_bbox(\n            flatten_bbox_preds,\n            flatten_bbox_targets,\n            flatten_bbox_weights,\n            avg_factor=num_total_samples)\n        loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n\n        # CARL Loss\n        carl_cfg = self.train_cfg.get('carl', None)\n        if carl_cfg is not None:\n            loss_carl = carl_loss(\n                flatten_cls_scores,\n                flatten_labels,\n                flatten_bbox_preds,\n                flatten_bbox_targets,\n                self.loss_bbox,\n                **self.train_cfg.carl,\n                avg_factor=num_total_pos,\n                sigmoid=True,\n                num_class=self.num_classes)\n            loss_dict.update(loss_carl)\n\n        return loss_dict\n"
  },
  {
    "path": "mmdet/models/dense_heads/pisa_ssd_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.core import multi_apply\nfrom ..builder import HEADS\nfrom ..losses import CrossEntropyLoss, SmoothL1Loss, carl_loss, isr_p\nfrom .ssd_head import SSDHead\n\n\n# TODO: add loss evaluator for SSD\n@HEADS.register_module()\nclass PISASSDHead(SSDHead):\n\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes of each image\n                with shape (num_obj, 4).\n            gt_labels (list[Tensor]): Ground truth labels of each image\n                with shape (num_obj, 4).\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor]): Ignored gt bboxes of each image.\n                Default: None.\n\n        Returns:\n            dict: Loss dict, comprise classification loss regression loss and\n                carl loss.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=1,\n            unmap_outputs=False,\n            return_sampling_results=True)\n        if cls_reg_targets is None:\n            return None\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg, sampling_results_list) = cls_reg_targets\n\n        num_images = len(img_metas)\n        all_cls_scores = torch.cat([\n            s.permute(0, 2, 3, 1).reshape(\n                num_images, -1, self.cls_out_channels) for s in cls_scores\n        ], 1)\n        all_labels = torch.cat(labels_list, -1).view(num_images, -1)\n        all_label_weights = torch.cat(label_weights_list,\n                                      -1).view(num_images, -1)\n        all_bbox_preds = torch.cat([\n            b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)\n            for b in bbox_preds\n        ], -2)\n        all_bbox_targets = torch.cat(bbox_targets_list,\n                                     -2).view(num_images, -1, 4)\n        all_bbox_weights = torch.cat(bbox_weights_list,\n                                     -2).view(num_images, -1, 4)\n\n        # concat all level anchors to a single tensor\n        all_anchors = []\n        for i in range(num_images):\n            all_anchors.append(torch.cat(anchor_list[i]))\n\n        isr_cfg = self.train_cfg.get('isr', None)\n        all_targets = (all_labels.view(-1), all_label_weights.view(-1),\n                       all_bbox_targets.view(-1,\n                                             4), all_bbox_weights.view(-1, 4))\n        # apply ISR-P\n        if isr_cfg is not None:\n            all_targets = isr_p(\n                all_cls_scores.view(-1, all_cls_scores.size(-1)),\n                all_bbox_preds.view(-1, 4),\n                all_targets,\n                torch.cat(all_anchors),\n                sampling_results_list,\n                loss_cls=CrossEntropyLoss(),\n                bbox_coder=self.bbox_coder,\n                **self.train_cfg.isr,\n                num_class=self.num_classes)\n            (new_labels, new_label_weights, new_bbox_targets,\n             new_bbox_weights) = all_targets\n            all_labels = new_labels.view(all_labels.shape)\n            all_label_weights = new_label_weights.view(all_label_weights.shape)\n            all_bbox_targets = new_bbox_targets.view(all_bbox_targets.shape)\n            all_bbox_weights = new_bbox_weights.view(all_bbox_weights.shape)\n\n        # add CARL loss\n        carl_loss_cfg = self.train_cfg.get('carl', None)\n        if carl_loss_cfg is not None:\n            loss_carl = carl_loss(\n                all_cls_scores.view(-1, all_cls_scores.size(-1)),\n                all_targets[0],\n                all_bbox_preds.view(-1, 4),\n                all_targets[2],\n                SmoothL1Loss(beta=1.),\n                **self.train_cfg.carl,\n                avg_factor=num_total_pos,\n                num_class=self.num_classes)\n\n        # check NaN and Inf\n        assert torch.isfinite(all_cls_scores).all().item(), \\\n            'classification scores become infinite or NaN!'\n        assert torch.isfinite(all_bbox_preds).all().item(), \\\n            'bbox predications become infinite or NaN!'\n\n        losses_cls, losses_bbox = multi_apply(\n            self.loss_single,\n            all_cls_scores,\n            all_bbox_preds,\n            all_anchors,\n            all_labels,\n            all_label_weights,\n            all_bbox_targets,\n            all_bbox_weights,\n            num_total_samples=num_total_pos)\n        loss_dict = dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n        if carl_loss_cfg is not None:\n            loss_dict.update(loss_carl)\n        return loss_dict\n"
  },
  {
    "path": "mmdet/models/dense_heads/reppoints_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import DeformConv2d\n\nfrom mmdet.core import (build_assigner, build_sampler, images_to_levels,\n                        multi_apply, unmap)\nfrom mmdet.core.anchor.point_generator import MlvlPointGenerator\nfrom mmdet.core.utils import filter_scores_and_topk\nfrom ..builder import HEADS, build_loss\nfrom .anchor_free_head import AnchorFreeHead\n\n\n@HEADS.register_module()\nclass RepPointsHead(AnchorFreeHead):\n    \"\"\"RepPoint head.\n\n    Args:\n        point_feat_channels (int): Number of channels of points features.\n        gradient_mul (float): The multiplier to gradients from\n            points refinement and recognition.\n        point_strides (Iterable): points strides.\n        point_base_scale (int): bbox scale for assigning labels.\n        loss_cls (dict): Config of classification loss.\n        loss_bbox_init (dict): Config of initial points loss.\n        loss_bbox_refine (dict): Config of points loss in refinement.\n        use_grid_points (bool): If we use bounding box representation, the\n        reppoints is represented as grid points on the bounding box.\n        center_init (bool): Whether to use center point assignment.\n        transform_method (str): The methods to transform RepPoints to bbox.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 point_feat_channels=256,\n                 num_points=9,\n                 gradient_mul=0.1,\n                 point_strides=[8, 16, 32, 64, 128],\n                 point_base_scale=4,\n                 loss_cls=dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=1.0),\n                 loss_bbox_init=dict(\n                     type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=0.5),\n                 loss_bbox_refine=dict(\n                     type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0),\n                 use_grid_points=False,\n                 center_init=True,\n                 transform_method='moment',\n                 moment_mul=0.01,\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='reppoints_cls_out',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs):\n        self.num_points = num_points\n        self.point_feat_channels = point_feat_channels\n        self.use_grid_points = use_grid_points\n        self.center_init = center_init\n\n        # we use deform conv to extract points features\n        self.dcn_kernel = int(np.sqrt(num_points))\n        self.dcn_pad = int((self.dcn_kernel - 1) / 2)\n        assert self.dcn_kernel * self.dcn_kernel == num_points, \\\n            'The points number should be a square number.'\n        assert self.dcn_kernel % 2 == 1, \\\n            'The points number should be an odd square number.'\n        dcn_base = np.arange(-self.dcn_pad,\n                             self.dcn_pad + 1).astype(np.float64)\n        dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)\n        dcn_base_x = np.tile(dcn_base, self.dcn_kernel)\n        dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(\n            (-1))\n        self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)\n\n        super().__init__(\n            num_classes,\n            in_channels,\n            loss_cls=loss_cls,\n            init_cfg=init_cfg,\n            **kwargs)\n\n        self.gradient_mul = gradient_mul\n        self.point_base_scale = point_base_scale\n        self.point_strides = point_strides\n        self.prior_generator = MlvlPointGenerator(\n            self.point_strides, offset=0.)\n\n        self.sampling = loss_cls['type'] not in ['FocalLoss']\n        if self.train_cfg:\n            self.init_assigner = build_assigner(self.train_cfg.init.assigner)\n            self.refine_assigner = build_assigner(\n                self.train_cfg.refine.assigner)\n            # use PseudoSampler when sampling is False\n            if self.sampling and hasattr(self.train_cfg, 'sampler'):\n                sampler_cfg = self.train_cfg.sampler\n            else:\n                sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n        self.transform_method = transform_method\n        if self.transform_method == 'moment':\n            self.moment_transfer = nn.Parameter(\n                data=torch.zeros(2), requires_grad=True)\n            self.moment_mul = moment_mul\n\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n        if self.use_sigmoid_cls:\n            self.cls_out_channels = self.num_classes\n        else:\n            self.cls_out_channels = self.num_classes + 1\n        self.loss_bbox_init = build_loss(loss_bbox_init)\n        self.loss_bbox_refine = build_loss(loss_bbox_refine)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        pts_out_dim = 4 if self.use_grid_points else 2 * self.num_points\n        self.reppoints_cls_conv = DeformConv2d(self.feat_channels,\n                                               self.point_feat_channels,\n                                               self.dcn_kernel, 1,\n                                               self.dcn_pad)\n        self.reppoints_cls_out = nn.Conv2d(self.point_feat_channels,\n                                           self.cls_out_channels, 1, 1, 0)\n        self.reppoints_pts_init_conv = nn.Conv2d(self.feat_channels,\n                                                 self.point_feat_channels, 3,\n                                                 1, 1)\n        self.reppoints_pts_init_out = nn.Conv2d(self.point_feat_channels,\n                                                pts_out_dim, 1, 1, 0)\n        self.reppoints_pts_refine_conv = DeformConv2d(self.feat_channels,\n                                                      self.point_feat_channels,\n                                                      self.dcn_kernel, 1,\n                                                      self.dcn_pad)\n        self.reppoints_pts_refine_out = nn.Conv2d(self.point_feat_channels,\n                                                  pts_out_dim, 1, 1, 0)\n\n    def points2bbox(self, pts, y_first=True):\n        \"\"\"Converting the points set into bounding box.\n\n        :param pts: the input points sets (fields), each points\n            set (fields) is represented as 2n scalar.\n        :param y_first: if y_first=True, the point set is represented as\n            [y1, x1, y2, x2 ... yn, xn], otherwise the point set is\n            represented as [x1, y1, x2, y2 ... xn, yn].\n        :return: each points set is converting to a bbox [x1, y1, x2, y2].\n        \"\"\"\n        pts_reshape = pts.view(pts.shape[0], -1, 2, *pts.shape[2:])\n        pts_y = pts_reshape[:, :, 0, ...] if y_first else pts_reshape[:, :, 1,\n                                                                      ...]\n        pts_x = pts_reshape[:, :, 1, ...] if y_first else pts_reshape[:, :, 0,\n                                                                      ...]\n        if self.transform_method == 'minmax':\n            bbox_left = pts_x.min(dim=1, keepdim=True)[0]\n            bbox_right = pts_x.max(dim=1, keepdim=True)[0]\n            bbox_up = pts_y.min(dim=1, keepdim=True)[0]\n            bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]\n            bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],\n                             dim=1)\n        elif self.transform_method == 'partial_minmax':\n            pts_y = pts_y[:, :4, ...]\n            pts_x = pts_x[:, :4, ...]\n            bbox_left = pts_x.min(dim=1, keepdim=True)[0]\n            bbox_right = pts_x.max(dim=1, keepdim=True)[0]\n            bbox_up = pts_y.min(dim=1, keepdim=True)[0]\n            bbox_bottom = pts_y.max(dim=1, keepdim=True)[0]\n            bbox = torch.cat([bbox_left, bbox_up, bbox_right, bbox_bottom],\n                             dim=1)\n        elif self.transform_method == 'moment':\n            pts_y_mean = pts_y.mean(dim=1, keepdim=True)\n            pts_x_mean = pts_x.mean(dim=1, keepdim=True)\n            pts_y_std = torch.std(pts_y - pts_y_mean, dim=1, keepdim=True)\n            pts_x_std = torch.std(pts_x - pts_x_mean, dim=1, keepdim=True)\n            moment_transfer = (self.moment_transfer * self.moment_mul) + (\n                self.moment_transfer.detach() * (1 - self.moment_mul))\n            moment_width_transfer = moment_transfer[0]\n            moment_height_transfer = moment_transfer[1]\n            half_width = pts_x_std * torch.exp(moment_width_transfer)\n            half_height = pts_y_std * torch.exp(moment_height_transfer)\n            bbox = torch.cat([\n                pts_x_mean - half_width, pts_y_mean - half_height,\n                pts_x_mean + half_width, pts_y_mean + half_height\n            ],\n                             dim=1)\n        else:\n            raise NotImplementedError\n        return bbox\n\n    def gen_grid_from_reg(self, reg, previous_boxes):\n        \"\"\"Base on the previous bboxes and regression values, we compute the\n        regressed bboxes and generate the grids on the bboxes.\n\n        :param reg: the regression value to previous bboxes.\n        :param previous_boxes: previous bboxes.\n        :return: generate grids on the regressed bboxes.\n        \"\"\"\n        b, _, h, w = reg.shape\n        bxy = (previous_boxes[:, :2, ...] + previous_boxes[:, 2:, ...]) / 2.\n        bwh = (previous_boxes[:, 2:, ...] -\n               previous_boxes[:, :2, ...]).clamp(min=1e-6)\n        grid_topleft = bxy + bwh * reg[:, :2, ...] - 0.5 * bwh * torch.exp(\n            reg[:, 2:, ...])\n        grid_wh = bwh * torch.exp(reg[:, 2:, ...])\n        grid_left = grid_topleft[:, [0], ...]\n        grid_top = grid_topleft[:, [1], ...]\n        grid_width = grid_wh[:, [0], ...]\n        grid_height = grid_wh[:, [1], ...]\n        intervel = torch.linspace(0., 1., self.dcn_kernel).view(\n            1, self.dcn_kernel, 1, 1).type_as(reg)\n        grid_x = grid_left + grid_width * intervel\n        grid_x = grid_x.unsqueeze(1).repeat(1, self.dcn_kernel, 1, 1, 1)\n        grid_x = grid_x.view(b, -1, h, w)\n        grid_y = grid_top + grid_height * intervel\n        grid_y = grid_y.unsqueeze(2).repeat(1, 1, self.dcn_kernel, 1, 1)\n        grid_y = grid_y.view(b, -1, h, w)\n        grid_yx = torch.stack([grid_y, grid_x], dim=2)\n        grid_yx = grid_yx.view(b, -1, h, w)\n        regressed_bbox = torch.cat([\n            grid_left, grid_top, grid_left + grid_width, grid_top + grid_height\n        ], 1)\n        return grid_yx, regressed_bbox\n\n    def forward(self, feats):\n        return multi_apply(self.forward_single, feats)\n\n    def forward_single(self, x):\n        \"\"\"Forward feature map of a single FPN level.\"\"\"\n        dcn_base_offset = self.dcn_base_offset.type_as(x)\n        # If we use center_init, the initial reppoints is from center points.\n        # If we use bounding bbox representation, the initial reppoints is\n        #   from regular grid placed on a pre-defined bbox.\n        if self.use_grid_points or not self.center_init:\n            scale = self.point_base_scale / 2\n            points_init = dcn_base_offset / dcn_base_offset.max() * scale\n            bbox_init = x.new_tensor([-scale, -scale, scale,\n                                      scale]).view(1, 4, 1, 1)\n        else:\n            points_init = 0\n        cls_feat = x\n        pts_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            pts_feat = reg_conv(pts_feat)\n        # initialize reppoints\n        pts_out_init = self.reppoints_pts_init_out(\n            self.relu(self.reppoints_pts_init_conv(pts_feat)))\n        if self.use_grid_points:\n            pts_out_init, bbox_out_init = self.gen_grid_from_reg(\n                pts_out_init, bbox_init.detach())\n        else:\n            pts_out_init = pts_out_init + points_init\n        # refine and classify reppoints\n        pts_out_init_grad_mul = (1 - self.gradient_mul) * pts_out_init.detach(\n        ) + self.gradient_mul * pts_out_init\n        dcn_offset = pts_out_init_grad_mul - dcn_base_offset\n        cls_out = self.reppoints_cls_out(\n            self.relu(self.reppoints_cls_conv(cls_feat, dcn_offset)))\n        pts_out_refine = self.reppoints_pts_refine_out(\n            self.relu(self.reppoints_pts_refine_conv(pts_feat, dcn_offset)))\n        if self.use_grid_points:\n            pts_out_refine, bbox_out_refine = self.gen_grid_from_reg(\n                pts_out_refine, bbox_out_init.detach())\n        else:\n            pts_out_refine = pts_out_refine + pts_out_init.detach()\n\n        if self.training:\n            return cls_out, pts_out_init, pts_out_refine\n        else:\n            return cls_out, self.points2bbox(pts_out_refine)\n\n    def get_points(self, featmap_sizes, img_metas, device):\n        \"\"\"Get points according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            img_metas (list[dict]): Image meta info.\n\n        Returns:\n            tuple: points of each image, valid flags of each image\n        \"\"\"\n        num_imgs = len(img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # points center for one time\n        multi_level_points = self.prior_generator.grid_priors(\n            featmap_sizes, device=device, with_stride=True)\n        points_list = [[point.clone() for point in multi_level_points]\n                       for _ in range(num_imgs)]\n\n        # for each image, we compute valid flags of multi level grids\n        valid_flag_list = []\n        for img_id, img_meta in enumerate(img_metas):\n            multi_level_flags = self.prior_generator.valid_flags(\n                featmap_sizes, img_meta['pad_shape'])\n            valid_flag_list.append(multi_level_flags)\n\n        return points_list, valid_flag_list\n\n    def centers_to_bboxes(self, point_list):\n        \"\"\"Get bboxes according to center points.\n\n        Only used in :class:`MaxIoUAssigner`.\n        \"\"\"\n        bbox_list = []\n        for i_img, point in enumerate(point_list):\n            bbox = []\n            for i_lvl in range(len(self.point_strides)):\n                scale = self.point_base_scale * self.point_strides[i_lvl] * 0.5\n                bbox_shift = torch.Tensor([-scale, -scale, scale,\n                                           scale]).view(1, 4).type_as(point[0])\n                bbox_center = torch.cat(\n                    [point[i_lvl][:, :2], point[i_lvl][:, :2]], dim=1)\n                bbox.append(bbox_center + bbox_shift)\n            bbox_list.append(bbox)\n        return bbox_list\n\n    def offset_to_pts(self, center_list, pred_list):\n        \"\"\"Change from point offset to point coordinate.\"\"\"\n        pts_list = []\n        for i_lvl in range(len(self.point_strides)):\n            pts_lvl = []\n            for i_img in range(len(center_list)):\n                pts_center = center_list[i_img][i_lvl][:, :2].repeat(\n                    1, self.num_points)\n                pts_shift = pred_list[i_lvl][i_img]\n                yx_pts_shift = pts_shift.permute(1, 2, 0).view(\n                    -1, 2 * self.num_points)\n                y_pts_shift = yx_pts_shift[..., 0::2]\n                x_pts_shift = yx_pts_shift[..., 1::2]\n                xy_pts_shift = torch.stack([x_pts_shift, y_pts_shift], -1)\n                xy_pts_shift = xy_pts_shift.view(*yx_pts_shift.shape[:-1], -1)\n                pts = xy_pts_shift * self.point_strides[i_lvl] + pts_center\n                pts_lvl.append(pts)\n            pts_lvl = torch.stack(pts_lvl, 0)\n            pts_list.append(pts_lvl)\n        return pts_list\n\n    def _point_target_single(self,\n                             flat_proposals,\n                             valid_flags,\n                             gt_bboxes,\n                             gt_bboxes_ignore,\n                             gt_labels,\n                             stage='init',\n                             unmap_outputs=True):\n        inside_flags = valid_flags\n        if not inside_flags.any():\n            return (None, ) * 7\n        # assign gt and sample proposals\n        proposals = flat_proposals[inside_flags, :]\n\n        if stage == 'init':\n            assigner = self.init_assigner\n            pos_weight = self.train_cfg.init.pos_weight\n        else:\n            assigner = self.refine_assigner\n            pos_weight = self.train_cfg.refine.pos_weight\n        assign_result = assigner.assign(proposals, gt_bboxes, gt_bboxes_ignore,\n                                        None if self.sampling else gt_labels)\n        sampling_result = self.sampler.sample(assign_result, proposals,\n                                              gt_bboxes)\n\n        num_valid_proposals = proposals.shape[0]\n        bbox_gt = proposals.new_zeros([num_valid_proposals, 4])\n        pos_proposals = torch.zeros_like(proposals)\n        proposals_weights = proposals.new_zeros([num_valid_proposals, 4])\n        labels = proposals.new_full((num_valid_proposals, ),\n                                    self.num_classes,\n                                    dtype=torch.long)\n        label_weights = proposals.new_zeros(\n            num_valid_proposals, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            pos_gt_bboxes = sampling_result.pos_gt_bboxes\n            bbox_gt[pos_inds, :] = pos_gt_bboxes\n            pos_proposals[pos_inds, :] = proposals[pos_inds, :]\n            proposals_weights[pos_inds, :] = 1.0\n            if gt_labels is None:\n                # Only rpn gives gt_labels as None\n                # Foreground is the first class\n                labels[pos_inds] = 0\n            else:\n                labels[pos_inds] = gt_labels[\n                    sampling_result.pos_assigned_gt_inds]\n            if pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = pos_weight\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of proposals\n        if unmap_outputs:\n            num_total_proposals = flat_proposals.size(0)\n            labels = unmap(labels, num_total_proposals, inside_flags)\n            label_weights = unmap(label_weights, num_total_proposals,\n                                  inside_flags)\n            bbox_gt = unmap(bbox_gt, num_total_proposals, inside_flags)\n            pos_proposals = unmap(pos_proposals, num_total_proposals,\n                                  inside_flags)\n            proposals_weights = unmap(proposals_weights, num_total_proposals,\n                                      inside_flags)\n\n        return (labels, label_weights, bbox_gt, pos_proposals,\n                proposals_weights, pos_inds, neg_inds)\n\n    def get_targets(self,\n                    proposals_list,\n                    valid_flag_list,\n                    gt_bboxes_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None,\n                    gt_labels_list=None,\n                    stage='init',\n                    label_channels=1,\n                    unmap_outputs=True):\n        \"\"\"Compute corresponding GT box and classification targets for\n        proposals.\n\n        Args:\n            proposals_list (list[list]): Multi level points/bboxes of each\n                image.\n            valid_flag_list (list[list]): Multi level valid flags of each\n                image.\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be\n                ignored.\n            gt_bboxes_list (list[Tensor]): Ground truth labels of each box.\n            stage (str): `init` or `refine`. Generate target for init stage or\n                refine stage\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple:\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each level.  # noqa: E501\n                - bbox_gt_list (list[Tensor]): Ground truth bbox of each level.\n                - proposal_list (list[Tensor]): Proposals(points/bboxes) of each level.  # noqa: E501\n                - proposal_weights_list (list[Tensor]): Proposal weights of each level.  # noqa: E501\n                - num_total_pos (int): Number of positive samples in all images.  # noqa: E501\n                - num_total_neg (int): Number of negative samples in all images.  # noqa: E501\n        \"\"\"\n        assert stage in ['init', 'refine']\n        num_imgs = len(img_metas)\n        assert len(proposals_list) == len(valid_flag_list) == num_imgs\n\n        # points number of multi levels\n        num_level_proposals = [points.size(0) for points in proposals_list[0]]\n\n        # concat all level points and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(proposals_list[i]) == len(valid_flag_list[i])\n            proposals_list[i] = torch.cat(proposals_list[i])\n            valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n        if gt_labels_list is None:\n            gt_labels_list = [None for _ in range(num_imgs)]\n        (all_labels, all_label_weights, all_bbox_gt, all_proposals,\n         all_proposal_weights, pos_inds_list, neg_inds_list) = multi_apply(\n             self._point_target_single,\n             proposals_list,\n             valid_flag_list,\n             gt_bboxes_list,\n             gt_bboxes_ignore_list,\n             gt_labels_list,\n             stage=stage,\n             unmap_outputs=unmap_outputs)\n        # no valid points\n        if any([labels is None for labels in all_labels]):\n            return None\n        # sampled points of all images\n        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n        labels_list = images_to_levels(all_labels, num_level_proposals)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_proposals)\n        bbox_gt_list = images_to_levels(all_bbox_gt, num_level_proposals)\n        proposals_list = images_to_levels(all_proposals, num_level_proposals)\n        proposal_weights_list = images_to_levels(all_proposal_weights,\n                                                 num_level_proposals)\n        return (labels_list, label_weights_list, bbox_gt_list, proposals_list,\n                proposal_weights_list, num_total_pos, num_total_neg)\n\n    def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,\n                    label_weights, bbox_gt_init, bbox_weights_init,\n                    bbox_gt_refine, bbox_weights_refine, stride,\n                    num_total_samples_init, num_total_samples_refine):\n        # classification loss\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        cls_score = cls_score.permute(0, 2, 3,\n                                      1).reshape(-1, self.cls_out_channels)\n        cls_score = cls_score.contiguous()\n        loss_cls = self.loss_cls(\n            cls_score,\n            labels,\n            label_weights,\n            avg_factor=num_total_samples_refine)\n\n        # points loss\n        bbox_gt_init = bbox_gt_init.reshape(-1, 4)\n        bbox_weights_init = bbox_weights_init.reshape(-1, 4)\n        bbox_pred_init = self.points2bbox(\n            pts_pred_init.reshape(-1, 2 * self.num_points), y_first=False)\n        bbox_gt_refine = bbox_gt_refine.reshape(-1, 4)\n        bbox_weights_refine = bbox_weights_refine.reshape(-1, 4)\n        bbox_pred_refine = self.points2bbox(\n            pts_pred_refine.reshape(-1, 2 * self.num_points), y_first=False)\n        normalize_term = self.point_base_scale * stride\n        loss_pts_init = self.loss_bbox_init(\n            bbox_pred_init / normalize_term,\n            bbox_gt_init / normalize_term,\n            bbox_weights_init,\n            avg_factor=num_total_samples_init)\n        loss_pts_refine = self.loss_bbox_refine(\n            bbox_pred_refine / normalize_term,\n            bbox_gt_refine / normalize_term,\n            bbox_weights_refine,\n            avg_factor=num_total_samples_refine)\n        return loss_cls, loss_pts_init, loss_pts_refine\n\n    def loss(self,\n             cls_scores,\n             pts_preds_init,\n             pts_preds_refine,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        device = cls_scores[0].device\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n        # target for initial stage\n        center_list, valid_flag_list = self.get_points(featmap_sizes,\n                                                       img_metas, device)\n        pts_coordinate_preds_init = self.offset_to_pts(center_list,\n                                                       pts_preds_init)\n        if self.train_cfg.init.assigner['type'] == 'PointAssigner':\n            # Assign target for center list\n            candidate_list = center_list\n        else:\n            # transform center list to bbox list and\n            #   assign target for bbox list\n            bbox_list = self.centers_to_bboxes(center_list)\n            candidate_list = bbox_list\n        cls_reg_targets_init = self.get_targets(\n            candidate_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            stage='init',\n            label_channels=label_channels)\n        (*_, bbox_gt_list_init, candidate_list_init, bbox_weights_list_init,\n         num_total_pos_init, num_total_neg_init) = cls_reg_targets_init\n        num_total_samples_init = (\n            num_total_pos_init +\n            num_total_neg_init if self.sampling else num_total_pos_init)\n\n        # target for refinement stage\n        center_list, valid_flag_list = self.get_points(featmap_sizes,\n                                                       img_metas, device)\n        pts_coordinate_preds_refine = self.offset_to_pts(\n            center_list, pts_preds_refine)\n        bbox_list = []\n        for i_img, center in enumerate(center_list):\n            bbox = []\n            for i_lvl in range(len(pts_preds_refine)):\n                bbox_preds_init = self.points2bbox(\n                    pts_preds_init[i_lvl].detach())\n                bbox_shift = bbox_preds_init * self.point_strides[i_lvl]\n                bbox_center = torch.cat(\n                    [center[i_lvl][:, :2], center[i_lvl][:, :2]], dim=1)\n                bbox.append(bbox_center +\n                            bbox_shift[i_img].permute(1, 2, 0).reshape(-1, 4))\n            bbox_list.append(bbox)\n        cls_reg_targets_refine = self.get_targets(\n            bbox_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            stage='refine',\n            label_channels=label_channels)\n        (labels_list, label_weights_list, bbox_gt_list_refine,\n         candidate_list_refine, bbox_weights_list_refine, num_total_pos_refine,\n         num_total_neg_refine) = cls_reg_targets_refine\n        num_total_samples_refine = (\n            num_total_pos_refine +\n            num_total_neg_refine if self.sampling else num_total_pos_refine)\n\n        # compute loss\n        losses_cls, losses_pts_init, losses_pts_refine = multi_apply(\n            self.loss_single,\n            cls_scores,\n            pts_coordinate_preds_init,\n            pts_coordinate_preds_refine,\n            labels_list,\n            label_weights_list,\n            bbox_gt_list_init,\n            bbox_weights_list_init,\n            bbox_gt_list_refine,\n            bbox_weights_list_refine,\n            self.point_strides,\n            num_total_samples_init=num_total_samples_init,\n            num_total_samples_refine=num_total_samples_refine)\n        loss_dict_all = {\n            'loss_cls': losses_cls,\n            'loss_pts_init': losses_pts_init,\n            'loss_pts_refine': losses_pts_refine\n        }\n        return loss_dict_all\n\n    # Same as base_dense_head/_get_bboxes_single except self._bbox_decode\n    def _get_bboxes_single(self,\n                           cls_score_list,\n                           bbox_pred_list,\n                           score_factor_list,\n                           mlvl_priors,\n                           img_meta,\n                           cfg,\n                           rescale=False,\n                           with_nms=True,\n                           **kwargs):\n        \"\"\"Transform outputs of a single image into bbox predictions.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image. RepPoints head does not need\n                this value.\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid, has shape\n                (num_priors, 2).\n            img_meta (dict): Image meta info.\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n                is False and mlvl_score_factor is None, return mlvl_bboxes and\n                mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n                mlvl_score_factor. Usually with_nms is False is used for aug\n                test. If with_nms is True, then return the following format\n\n                - det_bboxes (Tensor): Predicted bboxes with shape \\\n                    [num_bboxes, 5], where the first 4 columns are bounding \\\n                    box positions (tl_x, tl_y, br_x, br_y) and the 5-th \\\n                    column are scores between 0 and 1.\n                - det_labels (Tensor): Predicted labels of the corresponding \\\n                    box with shape [num_bboxes].\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(cls_score_list) == len(bbox_pred_list)\n        img_shape = img_meta['img_shape']\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_labels = []\n        for level_idx, (cls_score, bbox_pred, priors) in enumerate(\n                zip(cls_score_list, bbox_pred_list, mlvl_priors)):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                scores = cls_score.softmax(-1)[:, :-1]\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(bbox_pred=bbox_pred, priors=priors))\n            scores, labels, _, filtered_results = results\n\n            bbox_pred = filtered_results['bbox_pred']\n            priors = filtered_results['priors']\n\n            bboxes = self._bbox_decode(priors, bbox_pred,\n                                       self.point_strides[level_idx],\n                                       img_shape)\n\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n\n        return self._bbox_post_process(\n            mlvl_scores,\n            mlvl_labels,\n            mlvl_bboxes,\n            img_meta['scale_factor'],\n            cfg,\n            rescale=rescale,\n            with_nms=with_nms)\n\n    def _bbox_decode(self, points, bbox_pred, stride, max_shape):\n        bbox_pos_center = torch.cat([points[:, :2], points[:, :2]], dim=1)\n        bboxes = bbox_pred * stride + bbox_pos_center\n        x1 = bboxes[:, 0].clamp(min=0, max=max_shape[1])\n        y1 = bboxes[:, 1].clamp(min=0, max=max_shape[0])\n        x2 = bboxes[:, 2].clamp(min=0, max=max_shape[1])\n        y2 = bboxes[:, 3].clamp(min=0, max=max_shape[0])\n        decoded_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)\n        return decoded_bboxes\n"
  },
  {
    "path": "mmdet/models/dense_heads/retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\n\nfrom ..builder import HEADS\nfrom .anchor_head import AnchorHead\n\n\n@HEADS.register_module()\nclass RetinaHead(AnchorHead):\n    r\"\"\"An anchor-based head used in `RetinaNet\n    <https://arxiv.org/pdf/1708.02002.pdf>`_.\n\n    The head contains two subnetworks. The first classifies anchor boxes and\n    the second regresses deltas for the anchors.\n\n    Example:\n        >>> import torch\n        >>> self = RetinaHead(11, 7)\n        >>> x = torch.rand(1, 7, 32, 32)\n        >>> cls_score, bbox_pred = self.forward_single(x)\n        >>> # Each anchor predicts a score for each class except background\n        >>> cls_per_anchor = cls_score.shape[1] / self.num_anchors\n        >>> box_per_anchor = bbox_pred.shape[1] / self.num_anchors\n        >>> assert cls_per_anchor == (self.num_classes)\n        >>> assert box_per_anchor == 4\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 stacked_convs=4,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 anchor_generator=dict(\n                     type='AnchorGenerator',\n                     octave_base_scale=4,\n                     scales_per_octave=3,\n                     ratios=[0.5, 1.0, 2.0],\n                     strides=[8, 16, 32, 64, 128]),\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='retina_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs):\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        super(RetinaHead, self).__init__(\n            num_classes,\n            in_channels,\n            anchor_generator=anchor_generator,\n            init_cfg=init_cfg,\n            **kwargs)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        self.retina_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.retina_reg = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n\n    def forward_single(self, x):\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n\n        Returns:\n            tuple:\n                cls_score (Tensor): Cls scores for a single scale level\n                    the channels number is num_anchors * num_classes.\n                bbox_pred (Tensor): Box energies / deltas for a single scale\n                    level, the channels number is num_anchors * 4.\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n        cls_score = self.retina_cls(cls_feat)\n        bbox_pred = self.retina_reg(reg_feat)\n        return cls_score, bbox_pred\n"
  },
  {
    "path": "mmdet/models/dense_heads/retina_sepbn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, bias_init_with_prob, normal_init\n\nfrom ..builder import HEADS\nfrom .anchor_head import AnchorHead\n\n\n@HEADS.register_module()\nclass RetinaSepBNHead(AnchorHead):\n    \"\"\"\"RetinaHead with separate BN.\n\n    In RetinaHead, conv/norm layers are shared across different FPN levels,\n    while in RetinaSepBNHead, conv layers are shared across different FPN\n    levels, but BN layers are separated.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 num_ins,\n                 in_channels,\n                 stacked_convs=4,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 init_cfg=None,\n                 **kwargs):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.num_ins = num_ins\n        super(RetinaSepBNHead, self).__init__(\n            num_classes, in_channels, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.num_ins):\n            cls_convs = nn.ModuleList()\n            reg_convs = nn.ModuleList()\n            for i in range(self.stacked_convs):\n                chn = self.in_channels if i == 0 else self.feat_channels\n                cls_convs.append(\n                    ConvModule(\n                        chn,\n                        self.feat_channels,\n                        3,\n                        stride=1,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg))\n                reg_convs.append(\n                    ConvModule(\n                        chn,\n                        self.feat_channels,\n                        3,\n                        stride=1,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg))\n            self.cls_convs.append(cls_convs)\n            self.reg_convs.append(reg_convs)\n        for i in range(self.stacked_convs):\n            for j in range(1, self.num_ins):\n                self.cls_convs[j][i].conv = self.cls_convs[0][i].conv\n                self.reg_convs[j][i].conv = self.reg_convs[0][i].conv\n        self.retina_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.retina_reg = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n\n    def init_weights(self):\n        \"\"\"Initialize weights of the head.\"\"\"\n        super(RetinaSepBNHead, self).init_weights()\n        for m in self.cls_convs[0]:\n            normal_init(m.conv, std=0.01)\n        for m in self.reg_convs[0]:\n            normal_init(m.conv, std=0.01)\n        bias_cls = bias_init_with_prob(0.01)\n        normal_init(self.retina_cls, std=0.01, bias=bias_cls)\n        normal_init(self.retina_reg, std=0.01)\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n                cls_scores (list[Tensor]): Classification scores for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_anchors * num_classes.\n                bbox_preds (list[Tensor]): Box energies / deltas for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_anchors * 4.\n        \"\"\"\n        cls_scores = []\n        bbox_preds = []\n        for i, x in enumerate(feats):\n            cls_feat = feats[i]\n            reg_feat = feats[i]\n            for cls_conv in self.cls_convs[i]:\n                cls_feat = cls_conv(cls_feat)\n            for reg_conv in self.reg_convs[i]:\n                reg_feat = reg_conv(reg_feat)\n            cls_score = self.retina_cls(cls_feat)\n            bbox_pred = self.retina_reg(reg_feat)\n            cls_scores.append(cls_score)\n            bbox_preds.append(bbox_pred)\n        return cls_scores, bbox_preds\n"
  },
  {
    "path": "mmdet/models/dense_heads/rpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import batched_nms\n\nfrom ..builder import HEADS\nfrom .anchor_head import AnchorHead\n\n\n@HEADS.register_module()\nclass RPNHead(AnchorHead):\n    \"\"\"RPN head.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n        num_convs (int): Number of convolution layers in the head. Default 1.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 in_channels,\n                 init_cfg=dict(type='Normal', layer='Conv2d', std=0.01),\n                 num_convs=1,\n                 **kwargs):\n        self.num_convs = num_convs\n        super(RPNHead, self).__init__(\n            1, in_channels, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        if self.num_convs > 1:\n            rpn_convs = []\n            for i in range(self.num_convs):\n                if i == 0:\n                    in_channels = self.in_channels\n                else:\n                    in_channels = self.feat_channels\n                # use ``inplace=False`` to avoid error: one of the variables\n                # needed for gradient computation has been modified by an\n                # inplace operation.\n                rpn_convs.append(\n                    ConvModule(\n                        in_channels,\n                        self.feat_channels,\n                        3,\n                        padding=1,\n                        inplace=False))\n            self.rpn_conv = nn.Sequential(*rpn_convs)\n        else:\n            self.rpn_conv = nn.Conv2d(\n                self.in_channels, self.feat_channels, 3, padding=1)\n        self.rpn_cls = nn.Conv2d(self.feat_channels,\n                                 self.num_base_priors * self.cls_out_channels,\n                                 1)\n        self.rpn_reg = nn.Conv2d(self.feat_channels, self.num_base_priors * 4,\n                                 1)\n\n    def forward_single(self, x):\n        \"\"\"Forward feature map of a single scale level.\"\"\"\n        x = self.rpn_conv(x)\n        x = F.relu(x, inplace=False)\n        rpn_cls_score = self.rpn_cls(x)\n        rpn_bbox_pred = self.rpn_reg(x)\n        return rpn_cls_score, rpn_bbox_pred\n\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        losses = super(RPNHead, self).loss(\n            cls_scores,\n            bbox_preds,\n            gt_bboxes,\n            None,\n            img_metas,\n            gt_bboxes_ignore=gt_bboxes_ignore)\n        return dict(\n            loss_rpn_cls=losses['loss_cls'], loss_rpn_bbox=losses['loss_bbox'])\n\n    def _get_bboxes_single(self,\n                           cls_score_list,\n                           bbox_pred_list,\n                           score_factor_list,\n                           mlvl_anchors,\n                           img_meta,\n                           cfg,\n                           rescale=False,\n                           with_nms=True,\n                           **kwargs):\n        \"\"\"Transform outputs of a single image into bbox predictions.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_anchors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has\n                shape (num_anchors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image. RPN head does not need this value.\n            mlvl_anchors (list[Tensor]): Anchors of all scale level\n                each item has shape (num_anchors, 4).\n            img_meta (dict): Image meta info.\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            Tensor: Labeled boxes in shape (n, 5), where the first 4 columns\n                are bounding box positions (tl_x, tl_y, br_x, br_y) and the\n                5-th column is a score between 0 and 1.\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        cfg = copy.deepcopy(cfg)\n        img_shape = img_meta['img_shape']\n\n        # bboxes from different level should be independent during NMS,\n        # level_ids are used as labels for batched NMS to separate them\n        level_ids = []\n        mlvl_scores = []\n        mlvl_bbox_preds = []\n        mlvl_valid_anchors = []\n        nms_pre = cfg.get('nms_pre', -1)\n        for level_idx in range(len(cls_score_list)):\n            rpn_cls_score = cls_score_list[level_idx]\n            rpn_bbox_pred = bbox_pred_list[level_idx]\n            assert rpn_cls_score.size()[-2:] == rpn_bbox_pred.size()[-2:]\n            rpn_cls_score = rpn_cls_score.permute(1, 2, 0)\n            if self.use_sigmoid_cls:\n                rpn_cls_score = rpn_cls_score.reshape(-1)\n                scores = rpn_cls_score.sigmoid()\n            else:\n                rpn_cls_score = rpn_cls_score.reshape(-1, 2)\n                # We set FG labels to [0, num_class-1] and BG label to\n                # num_class in RPN head since mmdet v2.5, which is unified to\n                # be consistent with other head since mmdet v2.0. In mmdet v2.0\n                # to v2.4 we keep BG label as 0 and FG label as 1 in rpn head.\n                scores = rpn_cls_score.softmax(dim=1)[:, 0]\n            rpn_bbox_pred = rpn_bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n\n            anchors = mlvl_anchors[level_idx]\n            if 0 < nms_pre < scores.shape[0]:\n                # sort is faster than topk\n                # _, topk_inds = scores.topk(cfg.nms_pre)\n                ranked_scores, rank_inds = scores.sort(descending=True)\n                topk_inds = rank_inds[:nms_pre]\n                scores = ranked_scores[:nms_pre]\n                rpn_bbox_pred = rpn_bbox_pred[topk_inds, :]\n                anchors = anchors[topk_inds, :]\n\n            mlvl_scores.append(scores)\n            mlvl_bbox_preds.append(rpn_bbox_pred)\n            mlvl_valid_anchors.append(anchors)\n            level_ids.append(\n                scores.new_full((scores.size(0), ),\n                                level_idx,\n                                dtype=torch.long))\n\n        return self._bbox_post_process(mlvl_scores, mlvl_bbox_preds,\n                                       mlvl_valid_anchors, level_ids, cfg,\n                                       img_shape)\n\n    def _bbox_post_process(self, mlvl_scores, mlvl_bboxes, mlvl_valid_anchors,\n                           level_ids, cfg, img_shape, **kwargs):\n        \"\"\"bbox post-processing method.\n\n        Do the nms operation for bboxes in same level.\n\n        Args:\n            mlvl_scores (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_bboxes, ).\n            mlvl_bboxes (list[Tensor]): Decoded bboxes from all scale\n                levels of a single image, each item has shape (num_bboxes, 4).\n            mlvl_valid_anchors (list[Tensor]): Anchors of all scale level\n                each item has shape (num_bboxes, 4).\n            level_ids (list[Tensor]): Indexes from all scale levels of a\n                single image, each item has shape (num_bboxes, ).\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, `self.test_cfg` would be used.\n            img_shape (tuple(int)): The shape of model's input image.\n\n        Returns:\n            Tensor: Labeled boxes in shape (n, 5), where the first 4 columns\n                are bounding box positions (tl_x, tl_y, br_x, br_y) and the\n                5-th column is a score between 0 and 1.\n        \"\"\"\n        scores = torch.cat(mlvl_scores)\n        anchors = torch.cat(mlvl_valid_anchors)\n        rpn_bbox_pred = torch.cat(mlvl_bboxes)\n        proposals = self.bbox_coder.decode(\n            anchors, rpn_bbox_pred, max_shape=img_shape)\n        ids = torch.cat(level_ids)\n\n        if cfg.min_bbox_size >= 0:\n            w = proposals[:, 2] - proposals[:, 0]\n            h = proposals[:, 3] - proposals[:, 1]\n            valid_mask = (w > cfg.min_bbox_size) & (h > cfg.min_bbox_size)\n            if not valid_mask.all():\n                proposals = proposals[valid_mask]\n                scores = scores[valid_mask]\n                ids = ids[valid_mask]\n\n        if proposals.numel() > 0:\n            dets, _ = batched_nms(proposals, scores, ids, cfg.nms)\n        else:\n            return proposals.new_zeros(0, 5)\n\n        return dets[:cfg.max_per_img]\n\n    def onnx_export(self, x, img_metas):\n        \"\"\"Test without augmentation.\n\n        Args:\n            x (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n            img_metas (list[dict]): Meta info of each image.\n        Returns:\n            Tensor: dets of shape [N, num_det, 5].\n        \"\"\"\n        cls_scores, bbox_preds = self(x)\n\n        assert len(cls_scores) == len(bbox_preds)\n\n        batch_bboxes, batch_scores = super(RPNHead, self).onnx_export(\n            cls_scores, bbox_preds, img_metas=img_metas, with_nms=False)\n        # Use ONNX::NonMaxSuppression in deployment\n        from mmdet.core.export import add_dummy_nms_for_onnx\n        cfg = copy.deepcopy(self.test_cfg)\n        score_threshold = cfg.nms.get('score_thr', 0.0)\n        nms_pre = cfg.get('deploy_nms_pre', -1)\n        # Different from the normal forward doing NMS level by level,\n        # we do NMS across all levels when exporting ONNX.\n        dets, _ = add_dummy_nms_for_onnx(batch_bboxes, batch_scores,\n                                         cfg.max_per_img,\n                                         cfg.nms.iou_threshold,\n                                         score_threshold, nms_pre,\n                                         cfg.max_per_img)\n        return dets\n"
  },
  {
    "path": "mmdet/models/dense_heads/sabl_retina_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (build_assigner, build_bbox_coder,\n                        build_prior_generator, build_sampler, images_to_levels,\n                        multi_apply, unmap)\nfrom mmdet.core.utils import filter_scores_and_topk\nfrom ..builder import HEADS, build_loss\nfrom .base_dense_head import BaseDenseHead\nfrom .dense_test_mixins import BBoxTestMixin\nfrom .guided_anchor_head import GuidedAnchorHead\n\n\n@HEADS.register_module()\nclass SABLRetinaHead(BaseDenseHead, BBoxTestMixin):\n    \"\"\"Side-Aware Boundary Localization (SABL) for RetinaNet.\n\n    The anchor generation, assigning and sampling in SABLRetinaHead\n    are the same as GuidedAnchorHead for guided anchoring.\n\n    Please refer to https://arxiv.org/abs/1912.04260 for more details.\n\n    Args:\n        num_classes (int): Number of classes.\n        in_channels (int): Number of channels in the input feature map.\n        stacked_convs (int): Number of Convs for classification \\\n            and regression branches. Defaults to 4.\n        feat_channels (int): Number of hidden channels. \\\n            Defaults to 256.\n        approx_anchor_generator (dict): Config dict for approx generator.\n        square_anchor_generator (dict): Config dict for square generator.\n        conv_cfg (dict): Config dict for ConvModule. Defaults to None.\n        norm_cfg (dict): Config dict for Norm Layer. Defaults to None.\n        bbox_coder (dict): Config dict for bbox coder.\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Default False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        train_cfg (dict): Training config of SABLRetinaHead.\n        test_cfg (dict): Testing config of SABLRetinaHead.\n        loss_cls (dict): Config of classification loss.\n        loss_bbox_cls (dict): Config of classification loss for bbox branch.\n        loss_bbox_reg (dict): Config of regression loss for bbox branch.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 stacked_convs=4,\n                 feat_channels=256,\n                 approx_anchor_generator=dict(\n                     type='AnchorGenerator',\n                     octave_base_scale=4,\n                     scales_per_octave=3,\n                     ratios=[0.5, 1.0, 2.0],\n                     strides=[8, 16, 32, 64, 128]),\n                 square_anchor_generator=dict(\n                     type='AnchorGenerator',\n                     ratios=[1.0],\n                     scales=[4],\n                     strides=[8, 16, 32, 64, 128]),\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 bbox_coder=dict(\n                     type='BucketingBBoxCoder',\n                     num_buckets=14,\n                     scale_factor=3.0),\n                 reg_decoded_bbox=False,\n                 train_cfg=None,\n                 test_cfg=None,\n                 loss_cls=dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=1.0),\n                 loss_bbox_cls=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.5),\n                 loss_bbox_reg=dict(\n                     type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.5),\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='retina_cls',\n                         std=0.01,\n                         bias_prob=0.01))):\n        super(SABLRetinaHead, self).__init__(init_cfg)\n        self.in_channels = in_channels\n        self.num_classes = num_classes\n        self.feat_channels = feat_channels\n        self.num_buckets = bbox_coder['num_buckets']\n        self.side_num = int(np.ceil(self.num_buckets / 2))\n\n        assert (approx_anchor_generator['octave_base_scale'] ==\n                square_anchor_generator['scales'][0])\n        assert (approx_anchor_generator['strides'] ==\n                square_anchor_generator['strides'])\n\n        self.approx_anchor_generator = build_prior_generator(\n            approx_anchor_generator)\n        self.square_anchor_generator = build_prior_generator(\n            square_anchor_generator)\n        self.approxs_per_octave = (\n            self.approx_anchor_generator.num_base_priors[0])\n\n        # one anchor per location\n        self.num_base_priors = self.square_anchor_generator.num_base_priors[0]\n\n        self.stacked_convs = stacked_convs\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n\n        self.reg_decoded_bbox = reg_decoded_bbox\n\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n        self.sampling = loss_cls['type'] not in [\n            'FocalLoss', 'GHMC', 'QualityFocalLoss'\n        ]\n        if self.use_sigmoid_cls:\n            self.cls_out_channels = num_classes\n        else:\n            self.cls_out_channels = num_classes + 1\n\n        self.bbox_coder = build_bbox_coder(bbox_coder)\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_bbox_cls = build_loss(loss_bbox_cls)\n        self.loss_bbox_reg = build_loss(loss_bbox_reg)\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n        if self.train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            # use PseudoSampler when sampling is False\n            if self.sampling and hasattr(self.train_cfg, 'sampler'):\n                sampler_cfg = self.train_cfg.sampler\n            else:\n                sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n\n        self.fp16_enabled = False\n        self._init_layers()\n\n    @property\n    def num_anchors(self):\n        warnings.warn('DeprecationWarning: `num_anchors` is deprecated, '\n                      'please use \"num_base_priors\" instead')\n        return self.square_anchor_generator.num_base_priors[0]\n\n    def _init_layers(self):\n        self.relu = nn.ReLU(inplace=True)\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n            self.reg_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        self.retina_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n        self.retina_bbox_reg = nn.Conv2d(\n            self.feat_channels, self.side_num * 4, 3, padding=1)\n        self.retina_bbox_cls = nn.Conv2d(\n            self.feat_channels, self.side_num * 4, 3, padding=1)\n\n    def forward_single(self, x):\n        cls_feat = x\n        reg_feat = x\n        for cls_conv in self.cls_convs:\n            cls_feat = cls_conv(cls_feat)\n        for reg_conv in self.reg_convs:\n            reg_feat = reg_conv(reg_feat)\n        cls_score = self.retina_cls(cls_feat)\n        bbox_cls_pred = self.retina_bbox_cls(reg_feat)\n        bbox_reg_pred = self.retina_bbox_reg(reg_feat)\n        bbox_pred = (bbox_cls_pred, bbox_reg_pred)\n        return cls_score, bbox_pred\n\n    def forward(self, feats):\n        return multi_apply(self.forward_single, feats)\n\n    def get_anchors(self, featmap_sizes, img_metas, device='cuda'):\n        \"\"\"Get squares according to feature map sizes and guided anchors.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            img_metas (list[dict]): Image meta info.\n            device (torch.device | str): device for returned tensors\n\n        Returns:\n            tuple: square approxs of each image\n        \"\"\"\n        num_imgs = len(img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # squares for one time\n        multi_level_squares = self.square_anchor_generator.grid_priors(\n            featmap_sizes, device=device)\n        squares_list = [multi_level_squares for _ in range(num_imgs)]\n\n        return squares_list\n\n    def get_target(self,\n                   approx_list,\n                   inside_flag_list,\n                   square_list,\n                   gt_bboxes_list,\n                   img_metas,\n                   gt_bboxes_ignore_list=None,\n                   gt_labels_list=None,\n                   label_channels=None,\n                   sampling=True,\n                   unmap_outputs=True):\n        \"\"\"Compute bucketing targets.\n        Args:\n            approx_list (list[list]): Multi level approxs of each image.\n            inside_flag_list (list[list]): Multi level inside flags of each\n                image.\n            square_list (list[list]): Multi level squares of each image.\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            gt_bboxes_ignore_list (list[Tensor]): ignore list of gt bboxes.\n            gt_bboxes_list (list[Tensor]): Gt bboxes of each image.\n            label_channels (int): Channel of label.\n            sampling (bool): Sample Anchors or not.\n            unmap_outputs (bool): unmap outputs or not.\n\n        Returns:\n            tuple: Returns a tuple containing learning targets.\n\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each \\\n                    level.\n                - bbox_cls_targets_list (list[Tensor]): BBox cls targets of \\\n                    each level.\n                - bbox_cls_weights_list (list[Tensor]): BBox cls weights of \\\n                    each level.\n                - bbox_reg_targets_list (list[Tensor]): BBox reg targets of \\\n                    each level.\n                - bbox_reg_weights_list (list[Tensor]): BBox reg weights of \\\n                    each level.\n                - num_total_pos (int): Number of positive samples in all \\\n                    images.\n                - num_total_neg (int): Number of negative samples in all \\\n                    images.\n        \"\"\"\n        num_imgs = len(img_metas)\n        assert len(approx_list) == len(inside_flag_list) == len(\n            square_list) == num_imgs\n        # anchor number of multi levels\n        num_level_squares = [squares.size(0) for squares in square_list[0]]\n        # concat all level anchors and flags to a single tensor\n        inside_flag_flat_list = []\n        approx_flat_list = []\n        square_flat_list = []\n        for i in range(num_imgs):\n            assert len(square_list[i]) == len(inside_flag_list[i])\n            inside_flag_flat_list.append(torch.cat(inside_flag_list[i]))\n            approx_flat_list.append(torch.cat(approx_list[i]))\n            square_flat_list.append(torch.cat(square_list[i]))\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n        if gt_labels_list is None:\n            gt_labels_list = [None for _ in range(num_imgs)]\n        (all_labels, all_label_weights, all_bbox_cls_targets,\n         all_bbox_cls_weights, all_bbox_reg_targets, all_bbox_reg_weights,\n         pos_inds_list, neg_inds_list) = multi_apply(\n             self._get_target_single,\n             approx_flat_list,\n             inside_flag_flat_list,\n             square_flat_list,\n             gt_bboxes_list,\n             gt_bboxes_ignore_list,\n             gt_labels_list,\n             img_metas,\n             label_channels=label_channels,\n             sampling=sampling,\n             unmap_outputs=unmap_outputs)\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n        # sampled anchors of all images\n        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n        # split targets to a list w.r.t. multiple levels\n        labels_list = images_to_levels(all_labels, num_level_squares)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_squares)\n        bbox_cls_targets_list = images_to_levels(all_bbox_cls_targets,\n                                                 num_level_squares)\n        bbox_cls_weights_list = images_to_levels(all_bbox_cls_weights,\n                                                 num_level_squares)\n        bbox_reg_targets_list = images_to_levels(all_bbox_reg_targets,\n                                                 num_level_squares)\n        bbox_reg_weights_list = images_to_levels(all_bbox_reg_weights,\n                                                 num_level_squares)\n        return (labels_list, label_weights_list, bbox_cls_targets_list,\n                bbox_cls_weights_list, bbox_reg_targets_list,\n                bbox_reg_weights_list, num_total_pos, num_total_neg)\n\n    def _get_target_single(self,\n                           flat_approxs,\n                           inside_flags,\n                           flat_squares,\n                           gt_bboxes,\n                           gt_bboxes_ignore,\n                           gt_labels,\n                           img_meta,\n                           label_channels=None,\n                           sampling=True,\n                           unmap_outputs=True):\n        \"\"\"Compute regression and classification targets for anchors in a\n        single image.\n\n        Args:\n            flat_approxs (Tensor): flat approxs of a single image,\n                shape (n, 4)\n            inside_flags (Tensor): inside flags of a single image,\n                shape (n, ).\n            flat_squares (Tensor): flat squares of a single image,\n                shape (approxs_per_octave * n, 4)\n            gt_bboxes (Tensor): Ground truth bboxes of a single image, \\\n                shape (num_gts, 4).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts,).\n            img_meta (dict): Meta info of the image.\n            label_channels (int): Channel of label.\n            sampling (bool): Sample Anchors or not.\n            unmap_outputs (bool): unmap outputs or not.\n\n        Returns:\n            tuple:\n\n                - labels_list (Tensor): Labels in a single image\n                - label_weights (Tensor): Label weights in a single image\n                - bbox_cls_targets (Tensor): BBox cls targets in a single image\n                - bbox_cls_weights (Tensor): BBox cls weights in a single image\n                - bbox_reg_targets (Tensor): BBox reg targets in a single image\n                - bbox_reg_weights (Tensor): BBox reg weights in a single image\n                - num_total_pos (int): Number of positive samples \\\n                    in a single image\n                - num_total_neg (int): Number of negative samples \\\n                    in a single image\n        \"\"\"\n        if not inside_flags.any():\n            return (None, ) * 8\n        # assign gt and sample anchors\n        expand_inside_flags = inside_flags[:, None].expand(\n            -1, self.approxs_per_octave).reshape(-1)\n        approxs = flat_approxs[expand_inside_flags, :]\n        squares = flat_squares[inside_flags, :]\n\n        assign_result = self.assigner.assign(approxs, squares,\n                                             self.approxs_per_octave,\n                                             gt_bboxes, gt_bboxes_ignore)\n        sampling_result = self.sampler.sample(assign_result, squares,\n                                              gt_bboxes)\n\n        num_valid_squares = squares.shape[0]\n        bbox_cls_targets = squares.new_zeros(\n            (num_valid_squares, self.side_num * 4))\n        bbox_cls_weights = squares.new_zeros(\n            (num_valid_squares, self.side_num * 4))\n        bbox_reg_targets = squares.new_zeros(\n            (num_valid_squares, self.side_num * 4))\n        bbox_reg_weights = squares.new_zeros(\n            (num_valid_squares, self.side_num * 4))\n        labels = squares.new_full((num_valid_squares, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = squares.new_zeros(num_valid_squares, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            (pos_bbox_reg_targets, pos_bbox_reg_weights, pos_bbox_cls_targets,\n             pos_bbox_cls_weights) = self.bbox_coder.encode(\n                 sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes)\n\n            bbox_cls_targets[pos_inds, :] = pos_bbox_cls_targets\n            bbox_reg_targets[pos_inds, :] = pos_bbox_reg_targets\n            bbox_cls_weights[pos_inds, :] = pos_bbox_cls_weights\n            bbox_reg_weights[pos_inds, :] = pos_bbox_reg_weights\n            if gt_labels is None:\n                # Only rpn gives gt_labels as None\n                # Foreground is the first class\n                labels[pos_inds] = 0\n            else:\n                labels[pos_inds] = gt_labels[\n                    sampling_result.pos_assigned_gt_inds]\n            if self.train_cfg.pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg.pos_weight\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_squares.size(0)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_cls_targets = unmap(bbox_cls_targets, num_total_anchors,\n                                     inside_flags)\n            bbox_cls_weights = unmap(bbox_cls_weights, num_total_anchors,\n                                     inside_flags)\n            bbox_reg_targets = unmap(bbox_reg_targets, num_total_anchors,\n                                     inside_flags)\n            bbox_reg_weights = unmap(bbox_reg_weights, num_total_anchors,\n                                     inside_flags)\n        return (labels, label_weights, bbox_cls_targets, bbox_cls_weights,\n                bbox_reg_targets, bbox_reg_weights, pos_inds, neg_inds)\n\n    def loss_single(self, cls_score, bbox_pred, labels, label_weights,\n                    bbox_cls_targets, bbox_cls_weights, bbox_reg_targets,\n                    bbox_reg_weights, num_total_samples):\n        # classification loss\n        labels = labels.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        cls_score = cls_score.permute(0, 2, 3,\n                                      1).reshape(-1, self.cls_out_channels)\n        loss_cls = self.loss_cls(\n            cls_score, labels, label_weights, avg_factor=num_total_samples)\n        # regression loss\n        bbox_cls_targets = bbox_cls_targets.reshape(-1, self.side_num * 4)\n        bbox_cls_weights = bbox_cls_weights.reshape(-1, self.side_num * 4)\n        bbox_reg_targets = bbox_reg_targets.reshape(-1, self.side_num * 4)\n        bbox_reg_weights = bbox_reg_weights.reshape(-1, self.side_num * 4)\n        (bbox_cls_pred, bbox_reg_pred) = bbox_pred\n        bbox_cls_pred = bbox_cls_pred.permute(0, 2, 3, 1).reshape(\n            -1, self.side_num * 4)\n        bbox_reg_pred = bbox_reg_pred.permute(0, 2, 3, 1).reshape(\n            -1, self.side_num * 4)\n        loss_bbox_cls = self.loss_bbox_cls(\n            bbox_cls_pred,\n            bbox_cls_targets.long(),\n            bbox_cls_weights,\n            avg_factor=num_total_samples * 4 * self.side_num)\n        loss_bbox_reg = self.loss_bbox_reg(\n            bbox_reg_pred,\n            bbox_reg_targets,\n            bbox_reg_weights,\n            avg_factor=num_total_samples * 4 * self.bbox_coder.offset_topk)\n        return loss_cls, loss_bbox_cls, loss_bbox_reg\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.approx_anchor_generator.num_levels\n\n        device = cls_scores[0].device\n\n        # get sampled approxes\n        approxs_list, inside_flag_list = GuidedAnchorHead.get_sampled_approxs(\n            self, featmap_sizes, img_metas, device=device)\n\n        square_list = self.get_anchors(featmap_sizes, img_metas, device=device)\n\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n        cls_reg_targets = self.get_target(\n            approxs_list,\n            inside_flag_list,\n            square_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels,\n            sampling=self.sampling)\n        if cls_reg_targets is None:\n            return None\n        (labels_list, label_weights_list, bbox_cls_targets_list,\n         bbox_cls_weights_list, bbox_reg_targets_list, bbox_reg_weights_list,\n         num_total_pos, num_total_neg) = cls_reg_targets\n        num_total_samples = (\n            num_total_pos + num_total_neg if self.sampling else num_total_pos)\n        losses_cls, losses_bbox_cls, losses_bbox_reg = multi_apply(\n            self.loss_single,\n            cls_scores,\n            bbox_preds,\n            labels_list,\n            label_weights_list,\n            bbox_cls_targets_list,\n            bbox_cls_weights_list,\n            bbox_reg_targets_list,\n            bbox_reg_weights_list,\n            num_total_samples=num_total_samples)\n        return dict(\n            loss_cls=losses_cls,\n            loss_bbox_cls=losses_bbox_cls,\n            loss_bbox_reg=losses_bbox_reg)\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def get_bboxes(self,\n                   cls_scores,\n                   bbox_preds,\n                   img_metas,\n                   cfg=None,\n                   rescale=False):\n        assert len(cls_scores) == len(bbox_preds)\n        num_levels = len(cls_scores)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n\n        device = cls_scores[0].device\n        mlvl_anchors = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        result_list = []\n        for img_id in range(len(img_metas)):\n            cls_score_list = [\n                cls_scores[i][img_id].detach() for i in range(num_levels)\n            ]\n            bbox_cls_pred_list = [\n                bbox_preds[i][0][img_id].detach() for i in range(num_levels)\n            ]\n            bbox_reg_pred_list = [\n                bbox_preds[i][1][img_id].detach() for i in range(num_levels)\n            ]\n            img_shape = img_metas[img_id]['img_shape']\n            scale_factor = img_metas[img_id]['scale_factor']\n            proposals = self._get_bboxes_single(\n                cls_score_list, bbox_cls_pred_list, bbox_reg_pred_list,\n                mlvl_anchors[img_id], img_shape, scale_factor, cfg, rescale)\n            result_list.append(proposals)\n        return result_list\n\n    def _get_bboxes_single(self,\n                           cls_scores,\n                           bbox_cls_preds,\n                           bbox_reg_preds,\n                           mlvl_anchors,\n                           img_shape,\n                           scale_factor,\n                           cfg,\n                           rescale=False):\n        cfg = self.test_cfg if cfg is None else cfg\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_confids = []\n        mlvl_labels = []\n        assert len(cls_scores) == len(bbox_cls_preds) == len(\n            bbox_reg_preds) == len(mlvl_anchors)\n        for cls_score, bbox_cls_pred, bbox_reg_pred, anchors in zip(\n                cls_scores, bbox_cls_preds, bbox_reg_preds, mlvl_anchors):\n            assert cls_score.size()[-2:] == bbox_cls_pred.size(\n            )[-2:] == bbox_reg_pred.size()[-2::]\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                scores = cls_score.softmax(-1)[:, :-1]\n            bbox_cls_pred = bbox_cls_pred.permute(1, 2, 0).reshape(\n                -1, self.side_num * 4)\n            bbox_reg_pred = bbox_reg_pred.permute(1, 2, 0).reshape(\n                -1, self.side_num * 4)\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(\n                    anchors=anchors,\n                    bbox_cls_pred=bbox_cls_pred,\n                    bbox_reg_pred=bbox_reg_pred))\n            scores, labels, _, filtered_results = results\n\n            anchors = filtered_results['anchors']\n            bbox_cls_pred = filtered_results['bbox_cls_pred']\n            bbox_reg_pred = filtered_results['bbox_reg_pred']\n\n            bbox_preds = [\n                bbox_cls_pred.contiguous(),\n                bbox_reg_pred.contiguous()\n            ]\n            bboxes, confids = self.bbox_coder.decode(\n                anchors.contiguous(), bbox_preds, max_shape=img_shape)\n\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_confids.append(confids)\n            mlvl_labels.append(labels)\n        return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes,\n                                       scale_factor, cfg, rescale, True,\n                                       mlvl_confids)\n"
  },
  {
    "path": "mmdet/models/dense_heads/solo_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.core import InstanceData, mask_matrix_nms, multi_apply\nfrom mmdet.core.utils import center_of_mass, generate_coordinate\nfrom mmdet.models.builder import HEADS, build_loss\nfrom mmdet.utils.misc import floordiv\nfrom .base_mask_head import BaseMaskHead\n\n\n@HEADS.register_module()\nclass SOLOHead(BaseMaskHead):\n    \"\"\"SOLO mask head used in `SOLO: Segmenting Objects by Locations.\n\n    <https://arxiv.org/abs/1912.04488>`_\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels. Used in child classes.\n            Default: 256.\n        stacked_convs (int): Number of stacking convs of the head.\n            Default: 4.\n        strides (tuple): Downsample factor of each feature map.\n        scale_ranges (tuple[tuple[int, int]]): Area range of multiple\n            level masks, in the format [(min1, max1), (min2, max2), ...].\n            A range of (16, 64) means the area range between (16, 64).\n        pos_scale (float): Constant scale factor to control the center region.\n        num_grids (list[int]): Divided image into a uniform grids, each\n            feature map has a different grid value. The number of output\n            channels is grid ** 2. Default: [40, 36, 24, 16, 12].\n        cls_down_index (int): The index of downsample operation in\n            classification branch. Default: 0.\n        loss_mask (dict): Config of mask loss.\n        loss_cls (dict): Config of classification loss.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Default: norm_cfg=dict(type='GN', num_groups=32,\n                                   requires_grad=True).\n        train_cfg (dict): Training config of head.\n        test_cfg (dict): Testing config of head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(\n        self,\n        num_classes,\n        in_channels,\n        feat_channels=256,\n        stacked_convs=4,\n        strides=(4, 8, 16, 32, 64),\n        scale_ranges=((8, 32), (16, 64), (32, 128), (64, 256), (128, 512)),\n        pos_scale=0.2,\n        num_grids=[40, 36, 24, 16, 12],\n        cls_down_index=0,\n        loss_mask=None,\n        loss_cls=None,\n        norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n        train_cfg=None,\n        test_cfg=None,\n        init_cfg=[\n            dict(type='Normal', layer='Conv2d', std=0.01),\n            dict(\n                type='Normal',\n                std=0.01,\n                bias_prob=0.01,\n                override=dict(name='conv_mask_list')),\n            dict(\n                type='Normal',\n                std=0.01,\n                bias_prob=0.01,\n                override=dict(name='conv_cls'))\n        ],\n    ):\n        super(SOLOHead, self).__init__(init_cfg)\n        self.num_classes = num_classes\n        self.cls_out_channels = self.num_classes\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.stacked_convs = stacked_convs\n        self.strides = strides\n        self.num_grids = num_grids\n        # number of FPN feats\n        self.num_levels = len(strides)\n        assert self.num_levels == len(scale_ranges) == len(num_grids)\n        self.scale_ranges = scale_ranges\n        self.pos_scale = pos_scale\n\n        self.cls_down_index = cls_down_index\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_mask = build_loss(loss_mask)\n        self.norm_cfg = norm_cfg\n        self.init_cfg = init_cfg\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        self._init_layers()\n\n    def _init_layers(self):\n        self.mask_convs = nn.ModuleList()\n        self.cls_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            chn = self.in_channels + 2 if i == 0 else self.feat_channels\n            self.mask_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n        self.conv_mask_list = nn.ModuleList()\n        for num_grid in self.num_grids:\n            self.conv_mask_list.append(\n                nn.Conv2d(self.feat_channels, num_grid**2, 1))\n\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n\n    def resize_feats(self, feats):\n        \"\"\"Downsample the first feat and upsample last feat in feats.\"\"\"\n        out = []\n        for i in range(len(feats)):\n            if i == 0:\n                out.append(\n                    F.interpolate(\n                        feats[0],\n                        size=feats[i + 1].shape[-2:],\n                        mode='bilinear',\n                        align_corners=False))\n            elif i == len(feats) - 1:\n                out.append(\n                    F.interpolate(\n                        feats[i],\n                        size=feats[i - 1].shape[-2:],\n                        mode='bilinear',\n                        align_corners=False))\n            else:\n                out.append(feats[i])\n        return out\n\n    def forward(self, feats):\n        assert len(feats) == self.num_levels\n        feats = self.resize_feats(feats)\n        mlvl_mask_preds = []\n        mlvl_cls_preds = []\n        for i in range(self.num_levels):\n            x = feats[i]\n            mask_feat = x\n            cls_feat = x\n            # generate and concat the coordinate\n            coord_feat = generate_coordinate(mask_feat.size(),\n                                             mask_feat.device)\n            mask_feat = torch.cat([mask_feat, coord_feat], 1)\n\n            for mask_layer in (self.mask_convs):\n                mask_feat = mask_layer(mask_feat)\n\n            mask_feat = F.interpolate(\n                mask_feat, scale_factor=2, mode='bilinear')\n            mask_pred = self.conv_mask_list[i](mask_feat)\n\n            # cls branch\n            for j, cls_layer in enumerate(self.cls_convs):\n                if j == self.cls_down_index:\n                    num_grid = self.num_grids[i]\n                    cls_feat = F.interpolate(\n                        cls_feat, size=num_grid, mode='bilinear')\n                cls_feat = cls_layer(cls_feat)\n\n            cls_pred = self.conv_cls(cls_feat)\n\n            if not self.training:\n                feat_wh = feats[0].size()[-2:]\n                upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2)\n                mask_pred = F.interpolate(\n                    mask_pred.sigmoid(), size=upsampled_size, mode='bilinear')\n                cls_pred = cls_pred.sigmoid()\n                # get local maximum\n                local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1)\n                keep_mask = local_max[:, :, :-1, :-1] == cls_pred\n                cls_pred = cls_pred * keep_mask\n\n            mlvl_mask_preds.append(mask_pred)\n            mlvl_cls_preds.append(cls_pred)\n        return mlvl_mask_preds, mlvl_cls_preds\n\n    def loss(self,\n             mlvl_mask_preds,\n             mlvl_cls_preds,\n             gt_labels,\n             gt_masks,\n             img_metas,\n             gt_bboxes=None,\n             **kwargs):\n        \"\"\"Calculate the loss of total batch.\n\n        Args:\n            mlvl_mask_preds (list[Tensor]): Multi-level mask prediction.\n                Each element in the list has shape\n                (batch_size, num_grids**2 ,h ,w).\n            mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes, num_grids ,num_grids).\n            gt_labels (list[Tensor]): Labels of multiple images.\n            gt_masks (list[Tensor]): Ground truth masks of multiple images.\n                Each has shape (num_instances, h, w).\n            img_metas (list[dict]): Meta information of multiple images.\n            gt_bboxes (list[Tensor]): Ground truth bboxes of multiple\n                images. Default: None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_levels = self.num_levels\n        num_imgs = len(gt_labels)\n\n        featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds]\n\n        # `BoolTensor` in `pos_masks` represent\n        # whether the corresponding point is\n        # positive\n        pos_mask_targets, labels, pos_masks = multi_apply(\n            self._get_targets_single,\n            gt_bboxes,\n            gt_labels,\n            gt_masks,\n            featmap_sizes=featmap_sizes)\n\n        # change from the outside list meaning multi images\n        # to the outside list meaning multi levels\n        mlvl_pos_mask_targets = [[] for _ in range(num_levels)]\n        mlvl_pos_mask_preds = [[] for _ in range(num_levels)]\n        mlvl_pos_masks = [[] for _ in range(num_levels)]\n        mlvl_labels = [[] for _ in range(num_levels)]\n        for img_id in range(num_imgs):\n            assert num_levels == len(pos_mask_targets[img_id])\n            for lvl in range(num_levels):\n                mlvl_pos_mask_targets[lvl].append(\n                    pos_mask_targets[img_id][lvl])\n                mlvl_pos_mask_preds[lvl].append(\n                    mlvl_mask_preds[lvl][img_id, pos_masks[img_id][lvl], ...])\n                mlvl_pos_masks[lvl].append(pos_masks[img_id][lvl].flatten())\n                mlvl_labels[lvl].append(labels[img_id][lvl].flatten())\n\n        # cat multiple image\n        temp_mlvl_cls_preds = []\n        for lvl in range(num_levels):\n            mlvl_pos_mask_targets[lvl] = torch.cat(\n                mlvl_pos_mask_targets[lvl], dim=0)\n            mlvl_pos_mask_preds[lvl] = torch.cat(\n                mlvl_pos_mask_preds[lvl], dim=0)\n            mlvl_pos_masks[lvl] = torch.cat(mlvl_pos_masks[lvl], dim=0)\n            mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0)\n            temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute(\n                0, 2, 3, 1).reshape(-1, self.cls_out_channels))\n\n        num_pos = sum(item.sum() for item in mlvl_pos_masks)\n        # dice loss\n        loss_mask = []\n        for pred, target in zip(mlvl_pos_mask_preds, mlvl_pos_mask_targets):\n            if pred.size()[0] == 0:\n                loss_mask.append(pred.sum().unsqueeze(0))\n                continue\n            loss_mask.append(\n                self.loss_mask(pred, target, reduction_override='none'))\n        if num_pos > 0:\n            loss_mask = torch.cat(loss_mask).sum() / num_pos\n        else:\n            loss_mask = torch.cat(loss_mask).mean()\n\n        flatten_labels = torch.cat(mlvl_labels)\n        flatten_cls_preds = torch.cat(temp_mlvl_cls_preds)\n        loss_cls = self.loss_cls(\n            flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1)\n        return dict(loss_mask=loss_mask, loss_cls=loss_cls)\n\n    def _get_targets_single(self,\n                            gt_bboxes,\n                            gt_labels,\n                            gt_masks,\n                            featmap_sizes=None):\n        \"\"\"Compute targets for predictions of single image.\n\n        Args:\n            gt_bboxes (Tensor): Ground truth bbox of each instance,\n                shape (num_gts, 4).\n            gt_labels (Tensor): Ground truth label of each instance,\n                shape (num_gts,).\n            gt_masks (Tensor): Ground truth mask of each instance,\n                shape (num_gts, h, w).\n            featmap_sizes (list[:obj:`torch.size`]): Size of each\n                feature map from feature pyramid, each element\n                means (feat_h, feat_w). Default: None.\n\n        Returns:\n            Tuple: Usually returns a tuple containing targets for predictions.\n\n                - mlvl_pos_mask_targets (list[Tensor]): Each element represent\n                  the binary mask targets for positive points in this\n                  level, has shape (num_pos, out_h, out_w).\n                - mlvl_labels (list[Tensor]): Each element is\n                  classification labels for all\n                  points in this level, has shape\n                  (num_grid, num_grid).\n                - mlvl_pos_masks (list[Tensor]): Each element is\n                  a `BoolTensor` to represent whether the\n                  corresponding point in single level\n                  is positive, has shape (num_grid **2).\n        \"\"\"\n        device = gt_labels.device\n        gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *\n                              (gt_bboxes[:, 3] - gt_bboxes[:, 1]))\n\n        mlvl_pos_mask_targets = []\n        mlvl_labels = []\n        mlvl_pos_masks = []\n        for (lower_bound, upper_bound), stride, featmap_size, num_grid \\\n                in zip(self.scale_ranges, self.strides,\n                       featmap_sizes, self.num_grids):\n\n            mask_target = torch.zeros(\n                [num_grid**2, featmap_size[0], featmap_size[1]],\n                dtype=torch.uint8,\n                device=device)\n            # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n            labels = torch.zeros([num_grid, num_grid],\n                                 dtype=torch.int64,\n                                 device=device) + self.num_classes\n            pos_mask = torch.zeros([num_grid**2],\n                                   dtype=torch.bool,\n                                   device=device)\n\n            gt_inds = ((gt_areas >= lower_bound) &\n                       (gt_areas <= upper_bound)).nonzero().flatten()\n            if len(gt_inds) == 0:\n                mlvl_pos_mask_targets.append(\n                    mask_target.new_zeros(0, featmap_size[0], featmap_size[1]))\n                mlvl_labels.append(labels)\n                mlvl_pos_masks.append(pos_mask)\n                continue\n            hit_gt_bboxes = gt_bboxes[gt_inds]\n            hit_gt_labels = gt_labels[gt_inds]\n            hit_gt_masks = gt_masks[gt_inds, ...]\n\n            pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] -\n                                  hit_gt_bboxes[:, 0]) * self.pos_scale\n            pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] -\n                                  hit_gt_bboxes[:, 1]) * self.pos_scale\n\n            # Make sure hit_gt_masks has a value\n            valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0\n            output_stride = stride / 2\n\n            for gt_mask, gt_label, pos_h_range, pos_w_range, \\\n                valid_mask_flag in \\\n                    zip(hit_gt_masks, hit_gt_labels, pos_h_ranges,\n                        pos_w_ranges, valid_mask_flags):\n                if not valid_mask_flag:\n                    continue\n                upsampled_size = (featmap_sizes[0][0] * 4,\n                                  featmap_sizes[0][1] * 4)\n                center_h, center_w = center_of_mass(gt_mask)\n\n                coord_w = int(\n                    floordiv((center_w / upsampled_size[1]), (1. / num_grid),\n                             rounding_mode='trunc'))\n                coord_h = int(\n                    floordiv((center_h / upsampled_size[0]), (1. / num_grid),\n                             rounding_mode='trunc'))\n\n                # left, top, right, down\n                top_box = max(\n                    0,\n                    int(\n                        floordiv(\n                            (center_h - pos_h_range) / upsampled_size[0],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                down_box = min(\n                    num_grid - 1,\n                    int(\n                        floordiv(\n                            (center_h + pos_h_range) / upsampled_size[0],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                left_box = max(\n                    0,\n                    int(\n                        floordiv(\n                            (center_w - pos_w_range) / upsampled_size[1],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                right_box = min(\n                    num_grid - 1,\n                    int(\n                        floordiv(\n                            (center_w + pos_w_range) / upsampled_size[1],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n\n                top = max(top_box, coord_h - 1)\n                down = min(down_box, coord_h + 1)\n                left = max(coord_w - 1, left_box)\n                right = min(right_box, coord_w + 1)\n\n                labels[top:(down + 1), left:(right + 1)] = gt_label\n                # ins\n                gt_mask = np.uint8(gt_mask.cpu().numpy())\n                # Follow the original implementation, F.interpolate is\n                # different from cv2 and opencv\n                gt_mask = mmcv.imrescale(gt_mask, scale=1. / output_stride)\n                gt_mask = torch.from_numpy(gt_mask).to(device=device)\n\n                for i in range(top, down + 1):\n                    for j in range(left, right + 1):\n                        index = int(i * num_grid + j)\n                        mask_target[index, :gt_mask.shape[0], :gt_mask.\n                                    shape[1]] = gt_mask\n                        pos_mask[index] = True\n            mlvl_pos_mask_targets.append(mask_target[pos_mask])\n            mlvl_labels.append(labels)\n            mlvl_pos_masks.append(pos_mask)\n        return mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks\n\n    def get_results(self, mlvl_mask_preds, mlvl_cls_scores, img_metas,\n                    **kwargs):\n        \"\"\"Get multi-image mask results.\n\n        Args:\n            mlvl_mask_preds (list[Tensor]): Multi-level mask prediction.\n                Each element in the list has shape\n                (batch_size, num_grids**2 ,h ,w).\n            mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes, num_grids ,num_grids).\n            img_metas (list[dict]): Meta information of all images.\n\n        Returns:\n            list[:obj:`InstanceData`]: Processed results of multiple\n            images.Each :obj:`InstanceData` usually contains\n            following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n        mlvl_cls_scores = [\n            item.permute(0, 2, 3, 1) for item in mlvl_cls_scores\n        ]\n        assert len(mlvl_mask_preds) == len(mlvl_cls_scores)\n        num_levels = len(mlvl_cls_scores)\n\n        results_list = []\n        for img_id in range(len(img_metas)):\n            cls_pred_list = [\n                mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels)\n                for lvl in range(num_levels)\n            ]\n            mask_pred_list = [\n                mlvl_mask_preds[lvl][img_id] for lvl in range(num_levels)\n            ]\n\n            cls_pred_list = torch.cat(cls_pred_list, dim=0)\n            mask_pred_list = torch.cat(mask_pred_list, dim=0)\n\n            results = self._get_results_single(\n                cls_pred_list, mask_pred_list, img_meta=img_metas[img_id])\n            results_list.append(results)\n\n        return results_list\n\n    def _get_results_single(self, cls_scores, mask_preds, img_meta, cfg=None):\n        \"\"\"Get processed mask related results of single image.\n\n        Args:\n            cls_scores (Tensor): Classification score of all points\n                in single image, has shape (num_points, num_classes).\n            mask_preds (Tensor): Mask prediction of all points in\n                single image, has shape (num_points, feat_h, feat_w).\n            img_meta (dict): Meta information of corresponding image.\n            cfg (dict, optional): Config used in test phase.\n                Default: None.\n\n        Returns:\n            :obj:`InstanceData`: Processed results of single image.\n             it usually contains following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n\n        def empty_results(results, cls_scores):\n            \"\"\"Generate a empty results.\"\"\"\n            results.scores = cls_scores.new_ones(0)\n            results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2])\n            results.labels = cls_scores.new_ones(0)\n            return results\n\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(cls_scores) == len(mask_preds)\n        results = InstanceData(img_meta)\n\n        featmap_size = mask_preds.size()[-2:]\n\n        img_shape = results.img_shape\n        ori_shape = results.ori_shape\n\n        h, w, _ = img_shape\n        upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4)\n\n        score_mask = (cls_scores > cfg.score_thr)\n        cls_scores = cls_scores[score_mask]\n        if len(cls_scores) == 0:\n            return empty_results(results, cls_scores)\n\n        inds = score_mask.nonzero()\n        cls_labels = inds[:, 1]\n\n        # Filter the mask mask with an area is smaller than\n        # stride of corresponding feature level\n        lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0)\n        strides = cls_scores.new_ones(lvl_interval[-1])\n        strides[:lvl_interval[0]] *= self.strides[0]\n        for lvl in range(1, self.num_levels):\n            strides[lvl_interval[lvl -\n                                 1]:lvl_interval[lvl]] *= self.strides[lvl]\n        strides = strides[inds[:, 0]]\n        mask_preds = mask_preds[inds[:, 0]]\n\n        masks = mask_preds > cfg.mask_thr\n        sum_masks = masks.sum((1, 2)).float()\n        keep = sum_masks > strides\n        if keep.sum() == 0:\n            return empty_results(results, cls_scores)\n        masks = masks[keep]\n        mask_preds = mask_preds[keep]\n        sum_masks = sum_masks[keep]\n        cls_scores = cls_scores[keep]\n        cls_labels = cls_labels[keep]\n\n        # maskness.\n        mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks\n        cls_scores *= mask_scores\n\n        scores, labels, _, keep_inds = mask_matrix_nms(\n            masks,\n            cls_labels,\n            cls_scores,\n            mask_area=sum_masks,\n            nms_pre=cfg.nms_pre,\n            max_num=cfg.max_per_img,\n            kernel=cfg.kernel,\n            sigma=cfg.sigma,\n            filter_thr=cfg.filter_thr)\n        mask_preds = mask_preds[keep_inds]\n        mask_preds = F.interpolate(\n            mask_preds.unsqueeze(0), size=upsampled_size,\n            mode='bilinear')[:, :, :h, :w]\n        mask_preds = F.interpolate(\n            mask_preds, size=ori_shape[:2], mode='bilinear').squeeze(0)\n        masks = mask_preds > cfg.mask_thr\n\n        results.masks = masks\n        results.labels = labels\n        results.scores = scores\n\n        return results\n\n\n@HEADS.register_module()\nclass DecoupledSOLOHead(SOLOHead):\n    \"\"\"Decoupled SOLO mask head used in `SOLO: Segmenting Objects by Locations.\n\n    <https://arxiv.org/abs/1912.04488>`_\n\n    Args:\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 init_cfg=[\n                     dict(type='Normal', layer='Conv2d', std=0.01),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_mask_list_x')),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_mask_list_y')),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_cls'))\n                 ],\n                 **kwargs):\n        super(DecoupledSOLOHead, self).__init__(\n            *args, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self):\n        self.mask_convs_x = nn.ModuleList()\n        self.mask_convs_y = nn.ModuleList()\n        self.cls_convs = nn.ModuleList()\n\n        for i in range(self.stacked_convs):\n            chn = self.in_channels + 1 if i == 0 else self.feat_channels\n            self.mask_convs_x.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n            self.mask_convs_y.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n\n        self.conv_mask_list_x = nn.ModuleList()\n        self.conv_mask_list_y = nn.ModuleList()\n        for num_grid in self.num_grids:\n            self.conv_mask_list_x.append(\n                nn.Conv2d(self.feat_channels, num_grid, 3, padding=1))\n            self.conv_mask_list_y.append(\n                nn.Conv2d(self.feat_channels, num_grid, 3, padding=1))\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n\n    def forward(self, feats):\n        assert len(feats) == self.num_levels\n        feats = self.resize_feats(feats)\n        mask_preds_x = []\n        mask_preds_y = []\n        cls_preds = []\n        for i in range(self.num_levels):\n            x = feats[i]\n            mask_feat = x\n            cls_feat = x\n            # generate and concat the coordinate\n            coord_feat = generate_coordinate(mask_feat.size(),\n                                             mask_feat.device)\n            mask_feat_x = torch.cat([mask_feat, coord_feat[:, 0:1, ...]], 1)\n            mask_feat_y = torch.cat([mask_feat, coord_feat[:, 1:2, ...]], 1)\n\n            for mask_layer_x, mask_layer_y in \\\n                    zip(self.mask_convs_x, self.mask_convs_y):\n                mask_feat_x = mask_layer_x(mask_feat_x)\n                mask_feat_y = mask_layer_y(mask_feat_y)\n\n            mask_feat_x = F.interpolate(\n                mask_feat_x, scale_factor=2, mode='bilinear')\n            mask_feat_y = F.interpolate(\n                mask_feat_y, scale_factor=2, mode='bilinear')\n\n            mask_pred_x = self.conv_mask_list_x[i](mask_feat_x)\n            mask_pred_y = self.conv_mask_list_y[i](mask_feat_y)\n\n            # cls branch\n            for j, cls_layer in enumerate(self.cls_convs):\n                if j == self.cls_down_index:\n                    num_grid = self.num_grids[i]\n                    cls_feat = F.interpolate(\n                        cls_feat, size=num_grid, mode='bilinear')\n                cls_feat = cls_layer(cls_feat)\n\n            cls_pred = self.conv_cls(cls_feat)\n\n            if not self.training:\n                feat_wh = feats[0].size()[-2:]\n                upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2)\n                mask_pred_x = F.interpolate(\n                    mask_pred_x.sigmoid(),\n                    size=upsampled_size,\n                    mode='bilinear')\n                mask_pred_y = F.interpolate(\n                    mask_pred_y.sigmoid(),\n                    size=upsampled_size,\n                    mode='bilinear')\n                cls_pred = cls_pred.sigmoid()\n                # get local maximum\n                local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1)\n                keep_mask = local_max[:, :, :-1, :-1] == cls_pred\n                cls_pred = cls_pred * keep_mask\n\n            mask_preds_x.append(mask_pred_x)\n            mask_preds_y.append(mask_pred_y)\n            cls_preds.append(cls_pred)\n        return mask_preds_x, mask_preds_y, cls_preds\n\n    def loss(self,\n             mlvl_mask_preds_x,\n             mlvl_mask_preds_y,\n             mlvl_cls_preds,\n             gt_labels,\n             gt_masks,\n             img_metas,\n             gt_bboxes=None,\n             **kwargs):\n        \"\"\"Calculate the loss of total batch.\n\n        Args:\n            mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction\n                from x branch. Each element in the list has shape\n                (batch_size, num_grids ,h ,w).\n            mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction\n                from y branch. Each element in the list has shape\n                (batch_size, num_grids ,h ,w).\n            mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes, num_grids ,num_grids).\n            gt_labels (list[Tensor]): Labels of multiple images.\n            gt_masks (list[Tensor]): Ground truth masks of multiple images.\n                Each has shape (num_instances, h, w).\n            img_metas (list[dict]): Meta information of multiple images.\n            gt_bboxes (list[Tensor]): Ground truth bboxes of multiple\n                images. Default: None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_levels = self.num_levels\n        num_imgs = len(gt_labels)\n        featmap_sizes = [featmap.size()[-2:] for featmap in mlvl_mask_preds_x]\n\n        pos_mask_targets, labels, \\\n            xy_pos_indexes = \\\n            multi_apply(self._get_targets_single,\n                        gt_bboxes,\n                        gt_labels,\n                        gt_masks,\n                        featmap_sizes=featmap_sizes)\n\n        # change from the outside list meaning multi images\n        # to the outside list meaning multi levels\n        mlvl_pos_mask_targets = [[] for _ in range(num_levels)]\n        mlvl_pos_mask_preds_x = [[] for _ in range(num_levels)]\n        mlvl_pos_mask_preds_y = [[] for _ in range(num_levels)]\n        mlvl_labels = [[] for _ in range(num_levels)]\n        for img_id in range(num_imgs):\n\n            for lvl in range(num_levels):\n                mlvl_pos_mask_targets[lvl].append(\n                    pos_mask_targets[img_id][lvl])\n                mlvl_pos_mask_preds_x[lvl].append(\n                    mlvl_mask_preds_x[lvl][img_id,\n                                           xy_pos_indexes[img_id][lvl][:, 1]])\n                mlvl_pos_mask_preds_y[lvl].append(\n                    mlvl_mask_preds_y[lvl][img_id,\n                                           xy_pos_indexes[img_id][lvl][:, 0]])\n                mlvl_labels[lvl].append(labels[img_id][lvl].flatten())\n\n        # cat multiple image\n        temp_mlvl_cls_preds = []\n        for lvl in range(num_levels):\n            mlvl_pos_mask_targets[lvl] = torch.cat(\n                mlvl_pos_mask_targets[lvl], dim=0)\n            mlvl_pos_mask_preds_x[lvl] = torch.cat(\n                mlvl_pos_mask_preds_x[lvl], dim=0)\n            mlvl_pos_mask_preds_y[lvl] = torch.cat(\n                mlvl_pos_mask_preds_y[lvl], dim=0)\n            mlvl_labels[lvl] = torch.cat(mlvl_labels[lvl], dim=0)\n            temp_mlvl_cls_preds.append(mlvl_cls_preds[lvl].permute(\n                0, 2, 3, 1).reshape(-1, self.cls_out_channels))\n\n        num_pos = 0.\n        # dice loss\n        loss_mask = []\n        for pred_x, pred_y, target in \\\n                zip(mlvl_pos_mask_preds_x,\n                    mlvl_pos_mask_preds_y, mlvl_pos_mask_targets):\n            num_masks = pred_x.size(0)\n            if num_masks == 0:\n                # make sure can get grad\n                loss_mask.append((pred_x.sum() + pred_y.sum()).unsqueeze(0))\n                continue\n            num_pos += num_masks\n            pred_mask = pred_y.sigmoid() * pred_x.sigmoid()\n            loss_mask.append(\n                self.loss_mask(pred_mask, target, reduction_override='none'))\n        if num_pos > 0:\n            loss_mask = torch.cat(loss_mask).sum() / num_pos\n        else:\n            loss_mask = torch.cat(loss_mask).mean()\n\n        # cate\n        flatten_labels = torch.cat(mlvl_labels)\n        flatten_cls_preds = torch.cat(temp_mlvl_cls_preds)\n\n        loss_cls = self.loss_cls(\n            flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1)\n        return dict(loss_mask=loss_mask, loss_cls=loss_cls)\n\n    def _get_targets_single(self,\n                            gt_bboxes,\n                            gt_labels,\n                            gt_masks,\n                            featmap_sizes=None):\n        \"\"\"Compute targets for predictions of single image.\n\n        Args:\n            gt_bboxes (Tensor): Ground truth bbox of each instance,\n                shape (num_gts, 4).\n            gt_labels (Tensor): Ground truth label of each instance,\n                shape (num_gts,).\n            gt_masks (Tensor): Ground truth mask of each instance,\n                shape (num_gts, h, w).\n            featmap_sizes (list[:obj:`torch.size`]): Size of each\n                feature map from feature pyramid, each element\n                means (feat_h, feat_w). Default: None.\n\n        Returns:\n            Tuple: Usually returns a tuple containing targets for predictions.\n\n                - mlvl_pos_mask_targets (list[Tensor]): Each element represent\n                  the binary mask targets for positive points in this\n                  level, has shape (num_pos, out_h, out_w).\n                - mlvl_labels (list[Tensor]): Each element is\n                  classification labels for all\n                  points in this level, has shape\n                  (num_grid, num_grid).\n                - mlvl_xy_pos_indexes (list[Tensor]): Each element\n                  in the list contains the index of positive samples in\n                  corresponding level, has shape (num_pos, 2), last\n                  dimension 2 present (index_x, index_y).\n        \"\"\"\n        mlvl_pos_mask_targets, mlvl_labels, \\\n            mlvl_pos_masks = \\\n            super()._get_targets_single(gt_bboxes, gt_labels, gt_masks,\n                                        featmap_sizes=featmap_sizes)\n\n        mlvl_xy_pos_indexes = [(item - self.num_classes).nonzero()\n                               for item in mlvl_labels]\n\n        return mlvl_pos_mask_targets, mlvl_labels, mlvl_xy_pos_indexes\n\n    def get_results(self,\n                    mlvl_mask_preds_x,\n                    mlvl_mask_preds_y,\n                    mlvl_cls_scores,\n                    img_metas,\n                    rescale=None,\n                    **kwargs):\n        \"\"\"Get multi-image mask results.\n\n        Args:\n            mlvl_mask_preds_x (list[Tensor]): Multi-level mask prediction\n                from x branch. Each element in the list has shape\n                (batch_size, num_grids ,h ,w).\n            mlvl_mask_preds_y (list[Tensor]): Multi-level mask prediction\n                from y branch. Each element in the list has shape\n                (batch_size, num_grids ,h ,w).\n            mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes ,num_grids ,num_grids).\n            img_metas (list[dict]): Meta information of all images.\n\n        Returns:\n            list[:obj:`InstanceData`]: Processed results of multiple\n            images.Each :obj:`InstanceData` usually contains\n            following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n        mlvl_cls_scores = [\n            item.permute(0, 2, 3, 1) for item in mlvl_cls_scores\n        ]\n        assert len(mlvl_mask_preds_x) == len(mlvl_cls_scores)\n        num_levels = len(mlvl_cls_scores)\n\n        results_list = []\n        for img_id in range(len(img_metas)):\n            cls_pred_list = [\n                mlvl_cls_scores[i][img_id].view(\n                    -1, self.cls_out_channels).detach()\n                for i in range(num_levels)\n            ]\n            mask_pred_list_x = [\n                mlvl_mask_preds_x[i][img_id] for i in range(num_levels)\n            ]\n            mask_pred_list_y = [\n                mlvl_mask_preds_y[i][img_id] for i in range(num_levels)\n            ]\n\n            cls_pred_list = torch.cat(cls_pred_list, dim=0)\n            mask_pred_list_x = torch.cat(mask_pred_list_x, dim=0)\n            mask_pred_list_y = torch.cat(mask_pred_list_y, dim=0)\n\n            results = self._get_results_single(\n                cls_pred_list,\n                mask_pred_list_x,\n                mask_pred_list_y,\n                img_meta=img_metas[img_id],\n                cfg=self.test_cfg)\n            results_list.append(results)\n        return results_list\n\n    def _get_results_single(self, cls_scores, mask_preds_x, mask_preds_y,\n                            img_meta, cfg):\n        \"\"\"Get processed mask related results of single image.\n\n        Args:\n            cls_scores (Tensor): Classification score of all points\n                in single image, has shape (num_points, num_classes).\n            mask_preds_x (Tensor): Mask prediction of x branch of\n                all points in single image, has shape\n                (sum_num_grids, feat_h, feat_w).\n            mask_preds_y (Tensor): Mask prediction of y branch of\n                all points in single image, has shape\n                (sum_num_grids, feat_h, feat_w).\n            img_meta (dict): Meta information of corresponding image.\n            cfg (dict): Config used in test phase.\n\n        Returns:\n            :obj:`InstanceData`: Processed results of single image.\n             it usually contains following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n\n        def empty_results(results, cls_scores):\n            \"\"\"Generate a empty results.\"\"\"\n            results.scores = cls_scores.new_ones(0)\n            results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2])\n            results.labels = cls_scores.new_ones(0)\n            return results\n\n        cfg = self.test_cfg if cfg is None else cfg\n\n        results = InstanceData(img_meta)\n        img_shape = results.img_shape\n        ori_shape = results.ori_shape\n        h, w, _ = img_shape\n        featmap_size = mask_preds_x.size()[-2:]\n        upsampled_size = (featmap_size[0] * 4, featmap_size[1] * 4)\n\n        score_mask = (cls_scores > cfg.score_thr)\n        cls_scores = cls_scores[score_mask]\n        inds = score_mask.nonzero()\n        lvl_interval = inds.new_tensor(self.num_grids).pow(2).cumsum(0)\n        num_all_points = lvl_interval[-1]\n        lvl_start_index = inds.new_ones(num_all_points)\n        num_grids = inds.new_ones(num_all_points)\n        seg_size = inds.new_tensor(self.num_grids).cumsum(0)\n        mask_lvl_start_index = inds.new_ones(num_all_points)\n        strides = inds.new_ones(num_all_points)\n\n        lvl_start_index[:lvl_interval[0]] *= 0\n        mask_lvl_start_index[:lvl_interval[0]] *= 0\n        num_grids[:lvl_interval[0]] *= self.num_grids[0]\n        strides[:lvl_interval[0]] *= self.strides[0]\n\n        for lvl in range(1, self.num_levels):\n            lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \\\n                lvl_interval[lvl - 1]\n            mask_lvl_start_index[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \\\n                seg_size[lvl - 1]\n            num_grids[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \\\n                self.num_grids[lvl]\n            strides[lvl_interval[lvl - 1]:lvl_interval[lvl]] *= \\\n                self.strides[lvl]\n\n        lvl_start_index = lvl_start_index[inds[:, 0]]\n        mask_lvl_start_index = mask_lvl_start_index[inds[:, 0]]\n        num_grids = num_grids[inds[:, 0]]\n        strides = strides[inds[:, 0]]\n\n        y_lvl_offset = (inds[:, 0] - lvl_start_index) // num_grids\n        x_lvl_offset = (inds[:, 0] - lvl_start_index) % num_grids\n        y_inds = mask_lvl_start_index + y_lvl_offset\n        x_inds = mask_lvl_start_index + x_lvl_offset\n\n        cls_labels = inds[:, 1]\n        mask_preds = mask_preds_x[x_inds, ...] * mask_preds_y[y_inds, ...]\n\n        masks = mask_preds > cfg.mask_thr\n        sum_masks = masks.sum((1, 2)).float()\n        keep = sum_masks > strides\n        if keep.sum() == 0:\n            return empty_results(results, cls_scores)\n\n        masks = masks[keep]\n        mask_preds = mask_preds[keep]\n        sum_masks = sum_masks[keep]\n        cls_scores = cls_scores[keep]\n        cls_labels = cls_labels[keep]\n\n        # maskness.\n        mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks\n        cls_scores *= mask_scores\n\n        scores, labels, _, keep_inds = mask_matrix_nms(\n            masks,\n            cls_labels,\n            cls_scores,\n            mask_area=sum_masks,\n            nms_pre=cfg.nms_pre,\n            max_num=cfg.max_per_img,\n            kernel=cfg.kernel,\n            sigma=cfg.sigma,\n            filter_thr=cfg.filter_thr)\n        mask_preds = mask_preds[keep_inds]\n        mask_preds = F.interpolate(\n            mask_preds.unsqueeze(0), size=upsampled_size,\n            mode='bilinear')[:, :, :h, :w]\n        mask_preds = F.interpolate(\n            mask_preds, size=ori_shape[:2], mode='bilinear').squeeze(0)\n        masks = mask_preds > cfg.mask_thr\n\n        results.masks = masks\n        results.labels = labels\n        results.scores = scores\n\n        return results\n\n\n@HEADS.register_module()\nclass DecoupledSOLOLightHead(DecoupledSOLOHead):\n    \"\"\"Decoupled Light SOLO mask head used in `SOLO: Segmenting Objects by\n    Locations <https://arxiv.org/abs/1912.04488>`_\n\n    Args:\n        with_dcn (bool): Whether use dcn in mask_convs and cls_convs,\n            default: False.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 dcn_cfg=None,\n                 init_cfg=[\n                     dict(type='Normal', layer='Conv2d', std=0.01),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_mask_list_x')),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_mask_list_y')),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_cls'))\n                 ],\n                 **kwargs):\n        assert dcn_cfg is None or isinstance(dcn_cfg, dict)\n        self.dcn_cfg = dcn_cfg\n        super(DecoupledSOLOLightHead, self).__init__(\n            *args, init_cfg=init_cfg, **kwargs)\n\n    def _init_layers(self):\n        self.mask_convs = nn.ModuleList()\n        self.cls_convs = nn.ModuleList()\n\n        for i in range(self.stacked_convs):\n            if self.dcn_cfg is not None\\\n                    and i == self.stacked_convs - 1:\n                conv_cfg = self.dcn_cfg\n            else:\n                conv_cfg = None\n\n            chn = self.in_channels + 2 if i == 0 else self.feat_channels\n            self.mask_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg))\n\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg))\n\n        self.conv_mask_list_x = nn.ModuleList()\n        self.conv_mask_list_y = nn.ModuleList()\n        for num_grid in self.num_grids:\n            self.conv_mask_list_x.append(\n                nn.Conv2d(self.feat_channels, num_grid, 3, padding=1))\n            self.conv_mask_list_y.append(\n                nn.Conv2d(self.feat_channels, num_grid, 3, padding=1))\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n\n    def forward(self, feats):\n        assert len(feats) == self.num_levels\n        feats = self.resize_feats(feats)\n        mask_preds_x = []\n        mask_preds_y = []\n        cls_preds = []\n        for i in range(self.num_levels):\n            x = feats[i]\n            mask_feat = x\n            cls_feat = x\n            # generate and concat the coordinate\n            coord_feat = generate_coordinate(mask_feat.size(),\n                                             mask_feat.device)\n            mask_feat = torch.cat([mask_feat, coord_feat], 1)\n\n            for mask_layer in self.mask_convs:\n                mask_feat = mask_layer(mask_feat)\n\n            mask_feat = F.interpolate(\n                mask_feat, scale_factor=2, mode='bilinear')\n\n            mask_pred_x = self.conv_mask_list_x[i](mask_feat)\n            mask_pred_y = self.conv_mask_list_y[i](mask_feat)\n\n            # cls branch\n            for j, cls_layer in enumerate(self.cls_convs):\n                if j == self.cls_down_index:\n                    num_grid = self.num_grids[i]\n                    cls_feat = F.interpolate(\n                        cls_feat, size=num_grid, mode='bilinear')\n                cls_feat = cls_layer(cls_feat)\n\n            cls_pred = self.conv_cls(cls_feat)\n\n            if not self.training:\n                feat_wh = feats[0].size()[-2:]\n                upsampled_size = (feat_wh[0] * 2, feat_wh[1] * 2)\n                mask_pred_x = F.interpolate(\n                    mask_pred_x.sigmoid(),\n                    size=upsampled_size,\n                    mode='bilinear')\n                mask_pred_y = F.interpolate(\n                    mask_pred_y.sigmoid(),\n                    size=upsampled_size,\n                    mode='bilinear')\n                cls_pred = cls_pred.sigmoid()\n                # get local maximum\n                local_max = F.max_pool2d(cls_pred, 2, stride=1, padding=1)\n                keep_mask = local_max[:, :, :-1, :-1] == cls_pred\n                cls_pred = cls_pred * keep_mask\n\n            mask_preds_x.append(mask_pred_x)\n            mask_preds_y.append(mask_pred_y)\n            cls_preds.append(cls_pred)\n        return mask_preds_x, mask_preds_y, cls_preds\n"
  },
  {
    "path": "mmdet/models/dense_heads/solov2_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule, auto_fp16, force_fp32\n\nfrom mmdet.core import InstanceData, mask_matrix_nms, multi_apply\nfrom mmdet.core.utils import center_of_mass, generate_coordinate\nfrom mmdet.models.builder import HEADS\nfrom mmdet.utils.misc import floordiv\nfrom .solo_head import SOLOHead\n\n\nclass MaskFeatModule(BaseModule):\n    \"\"\"SOLOv2 mask feature map branch used in `SOLOv2: Dynamic and Fast\n    Instance Segmentation. <https://arxiv.org/pdf/2003.10152>`_\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels of the mask feature\n             map branch.\n        start_level (int): The starting feature map level from RPN that\n             will be used to predict the mask feature map.\n        end_level (int): The ending feature map level from rpn that\n             will be used to predict the mask feature map.\n        out_channels (int): Number of output channels of the mask feature\n             map branch. This is the channel count of the mask\n             feature map that to be dynamically convolved with the predicted\n             kernel.\n        mask_stride (int): Downsample factor of the mask feature map output.\n            Default: 4.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 feat_channels,\n                 start_level,\n                 end_level,\n                 out_channels,\n                 mask_stride=4,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 init_cfg=[dict(type='Normal', layer='Conv2d', std=0.01)]):\n        super().__init__(init_cfg=init_cfg)\n\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.start_level = start_level\n        self.end_level = end_level\n        self.mask_stride = mask_stride\n        assert start_level >= 0 and end_level >= start_level\n        self.out_channels = out_channels\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self._init_layers()\n        self.fp16_enabled = False\n\n    def _init_layers(self):\n        self.convs_all_levels = nn.ModuleList()\n        for i in range(self.start_level, self.end_level + 1):\n            convs_per_level = nn.Sequential()\n            if i == 0:\n                convs_per_level.add_module(\n                    f'conv{i}',\n                    ConvModule(\n                        self.in_channels,\n                        self.feat_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        inplace=False))\n                self.convs_all_levels.append(convs_per_level)\n                continue\n\n            for j in range(i):\n                if j == 0:\n                    if i == self.end_level:\n                        chn = self.in_channels + 2\n                    else:\n                        chn = self.in_channels\n                    convs_per_level.add_module(\n                        f'conv{j}',\n                        ConvModule(\n                            chn,\n                            self.feat_channels,\n                            3,\n                            padding=1,\n                            conv_cfg=self.conv_cfg,\n                            norm_cfg=self.norm_cfg,\n                            inplace=False))\n                    convs_per_level.add_module(\n                        f'upsample{j}',\n                        nn.Upsample(\n                            scale_factor=2,\n                            mode='bilinear',\n                            align_corners=False))\n                    continue\n\n                convs_per_level.add_module(\n                    f'conv{j}',\n                    ConvModule(\n                        self.feat_channels,\n                        self.feat_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        inplace=False))\n                convs_per_level.add_module(\n                    f'upsample{j}',\n                    nn.Upsample(\n                        scale_factor=2, mode='bilinear', align_corners=False))\n\n            self.convs_all_levels.append(convs_per_level)\n\n        self.conv_pred = ConvModule(\n            self.feat_channels,\n            self.out_channels,\n            1,\n            padding=0,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg)\n\n    @auto_fp16()\n    def forward(self, feats):\n        inputs = feats[self.start_level:self.end_level + 1]\n        assert len(inputs) == (self.end_level - self.start_level + 1)\n        feature_add_all_level = self.convs_all_levels[0](inputs[0])\n        for i in range(1, len(inputs)):\n            input_p = inputs[i]\n            if i == len(inputs) - 1:\n                coord_feat = generate_coordinate(input_p.size(),\n                                                 input_p.device)\n                input_p = torch.cat([input_p, coord_feat], 1)\n\n            # fix runtime error of \"+=\" inplace operation in PyTorch 1.10\n            feature_add_all_level = feature_add_all_level + \\\n                self.convs_all_levels[i](input_p)\n\n        feature_pred = self.conv_pred(feature_add_all_level)\n        return feature_pred\n\n\n@HEADS.register_module()\nclass SOLOV2Head(SOLOHead):\n    \"\"\"SOLOv2 mask head used in `SOLOv2: Dynamic and Fast Instance\n    Segmentation. <https://arxiv.org/pdf/2003.10152>`_\n\n    Args:\n        mask_feature_head (dict): Config of SOLOv2MaskFeatHead.\n        dynamic_conv_size (int): Dynamic Conv kernel size. Default: 1.\n        dcn_cfg (dict): Dcn conv configurations in kernel_convs and cls_conv.\n            default: None.\n        dcn_apply_to_all_conv (bool): Whether to use dcn in every layer of\n            kernel_convs and cls_convs, or only the last layer. It shall be set\n            `True` for the normal version of SOLOv2 and `False` for the\n            light-weight version. default: True.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 mask_feature_head,\n                 dynamic_conv_size=1,\n                 dcn_cfg=None,\n                 dcn_apply_to_all_conv=True,\n                 init_cfg=[\n                     dict(type='Normal', layer='Conv2d', std=0.01),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         bias_prob=0.01,\n                         override=dict(name='conv_cls'))\n                 ],\n                 **kwargs):\n        assert dcn_cfg is None or isinstance(dcn_cfg, dict)\n        self.dcn_cfg = dcn_cfg\n        self.with_dcn = dcn_cfg is not None\n        self.dcn_apply_to_all_conv = dcn_apply_to_all_conv\n        self.dynamic_conv_size = dynamic_conv_size\n        mask_out_channels = mask_feature_head.get('out_channels')\n        self.kernel_out_channels = \\\n            mask_out_channels * self.dynamic_conv_size * self.dynamic_conv_size\n\n        super().__init__(*args, init_cfg=init_cfg, **kwargs)\n\n        # update the in_channels of mask_feature_head\n        if mask_feature_head.get('in_channels', None) is not None:\n            if mask_feature_head.in_channels != self.in_channels:\n                warnings.warn('The `in_channels` of SOLOv2MaskFeatHead and '\n                              'SOLOv2Head should be same, changing '\n                              'mask_feature_head.in_channels to '\n                              f'{self.in_channels}')\n                mask_feature_head.update(in_channels=self.in_channels)\n        else:\n            mask_feature_head.update(in_channels=self.in_channels)\n\n        self.mask_feature_head = MaskFeatModule(**mask_feature_head)\n        self.mask_stride = self.mask_feature_head.mask_stride\n        self.fp16_enabled = False\n\n    def _init_layers(self):\n        self.cls_convs = nn.ModuleList()\n        self.kernel_convs = nn.ModuleList()\n        conv_cfg = None\n        for i in range(self.stacked_convs):\n            if self.with_dcn:\n                if self.dcn_apply_to_all_conv:\n                    conv_cfg = self.dcn_cfg\n                elif i == self.stacked_convs - 1:\n                    # light head\n                    conv_cfg = self.dcn_cfg\n\n            chn = self.in_channels + 2 if i == 0 else self.feat_channels\n            self.kernel_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.norm_cfg is None))\n\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.cls_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.norm_cfg is None))\n\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n\n        self.conv_kernel = nn.Conv2d(\n            self.feat_channels, self.kernel_out_channels, 3, padding=1)\n\n    @auto_fp16()\n    def forward(self, feats):\n        assert len(feats) == self.num_levels\n        mask_feats = self.mask_feature_head(feats)\n        feats = self.resize_feats(feats)\n        mlvl_kernel_preds = []\n        mlvl_cls_preds = []\n        for i in range(self.num_levels):\n            ins_kernel_feat = feats[i]\n            # ins branch\n            # concat coord\n            coord_feat = generate_coordinate(ins_kernel_feat.size(),\n                                             ins_kernel_feat.device)\n            ins_kernel_feat = torch.cat([ins_kernel_feat, coord_feat], 1)\n\n            # kernel branch\n            kernel_feat = ins_kernel_feat\n            kernel_feat = F.interpolate(\n                kernel_feat,\n                size=self.num_grids[i],\n                mode='bilinear',\n                align_corners=False)\n\n            cate_feat = kernel_feat[:, :-2, :, :]\n\n            kernel_feat = kernel_feat.contiguous()\n            for i, kernel_conv in enumerate(self.kernel_convs):\n                kernel_feat = kernel_conv(kernel_feat)\n            kernel_pred = self.conv_kernel(kernel_feat)\n\n            # cate branch\n            cate_feat = cate_feat.contiguous()\n            for i, cls_conv in enumerate(self.cls_convs):\n                cate_feat = cls_conv(cate_feat)\n            cate_pred = self.conv_cls(cate_feat)\n\n            mlvl_kernel_preds.append(kernel_pred)\n            mlvl_cls_preds.append(cate_pred)\n\n        return mlvl_kernel_preds, mlvl_cls_preds, mask_feats\n\n    def _get_targets_single(self,\n                            gt_bboxes,\n                            gt_labels,\n                            gt_masks,\n                            featmap_size=None):\n        \"\"\"Compute targets for predictions of single image.\n\n        Args:\n            gt_bboxes (Tensor): Ground truth bbox of each instance,\n                shape (num_gts, 4).\n            gt_labels (Tensor): Ground truth label of each instance,\n                shape (num_gts,).\n            gt_masks (Tensor): Ground truth mask of each instance,\n                shape (num_gts, h, w).\n            featmap_sizes (:obj:`torch.size`): Size of UNified mask\n                feature map used to generate instance segmentation\n                masks by dynamic convolution, each element means\n                (feat_h, feat_w). Default: None.\n\n        Returns:\n            Tuple: Usually returns a tuple containing targets for predictions.\n\n                - mlvl_pos_mask_targets (list[Tensor]): Each element represent\n                  the binary mask targets for positive points in this\n                  level, has shape (num_pos, out_h, out_w).\n                - mlvl_labels (list[Tensor]): Each element is\n                  classification labels for all\n                  points in this level, has shape\n                  (num_grid, num_grid).\n                - mlvl_pos_masks  (list[Tensor]): Each element is\n                  a `BoolTensor` to represent whether the\n                  corresponding point in single level\n                  is positive, has shape (num_grid **2).\n                - mlvl_pos_indexes  (list[list]): Each element\n                  in the list contains the positive index in\n                  corresponding level, has shape (num_pos).\n        \"\"\"\n\n        device = gt_labels.device\n        gt_areas = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) *\n                              (gt_bboxes[:, 3] - gt_bboxes[:, 1]))\n\n        mlvl_pos_mask_targets = []\n        mlvl_pos_indexes = []\n        mlvl_labels = []\n        mlvl_pos_masks = []\n        for (lower_bound, upper_bound), num_grid \\\n                in zip(self.scale_ranges, self.num_grids):\n            mask_target = []\n            # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n            pos_index = []\n            labels = torch.zeros([num_grid, num_grid],\n                                 dtype=torch.int64,\n                                 device=device) + self.num_classes\n            pos_mask = torch.zeros([num_grid**2],\n                                   dtype=torch.bool,\n                                   device=device)\n\n            gt_inds = ((gt_areas >= lower_bound) &\n                       (gt_areas <= upper_bound)).nonzero().flatten()\n            if len(gt_inds) == 0:\n                mlvl_pos_mask_targets.append(\n                    torch.zeros([0, featmap_size[0], featmap_size[1]],\n                                dtype=torch.uint8,\n                                device=device))\n                mlvl_labels.append(labels)\n                mlvl_pos_masks.append(pos_mask)\n                mlvl_pos_indexes.append([])\n                continue\n            hit_gt_bboxes = gt_bboxes[gt_inds]\n            hit_gt_labels = gt_labels[gt_inds]\n            hit_gt_masks = gt_masks[gt_inds, ...]\n\n            pos_w_ranges = 0.5 * (hit_gt_bboxes[:, 2] -\n                                  hit_gt_bboxes[:, 0]) * self.pos_scale\n            pos_h_ranges = 0.5 * (hit_gt_bboxes[:, 3] -\n                                  hit_gt_bboxes[:, 1]) * self.pos_scale\n\n            # Make sure hit_gt_masks has a value\n            valid_mask_flags = hit_gt_masks.sum(dim=-1).sum(dim=-1) > 0\n\n            for gt_mask, gt_label, pos_h_range, pos_w_range, \\\n                valid_mask_flag in \\\n                    zip(hit_gt_masks, hit_gt_labels, pos_h_ranges,\n                        pos_w_ranges, valid_mask_flags):\n                if not valid_mask_flag:\n                    continue\n                upsampled_size = (featmap_size[0] * self.mask_stride,\n                                  featmap_size[1] * self.mask_stride)\n                center_h, center_w = center_of_mass(gt_mask)\n\n                coord_w = int(\n                    floordiv((center_w / upsampled_size[1]), (1. / num_grid),\n                             rounding_mode='trunc'))\n                coord_h = int(\n                    floordiv((center_h / upsampled_size[0]), (1. / num_grid),\n                             rounding_mode='trunc'))\n\n                # left, top, right, down\n                top_box = max(\n                    0,\n                    int(\n                        floordiv(\n                            (center_h - pos_h_range) / upsampled_size[0],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                down_box = min(\n                    num_grid - 1,\n                    int(\n                        floordiv(\n                            (center_h + pos_h_range) / upsampled_size[0],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                left_box = max(\n                    0,\n                    int(\n                        floordiv(\n                            (center_w - pos_w_range) / upsampled_size[1],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n                right_box = min(\n                    num_grid - 1,\n                    int(\n                        floordiv(\n                            (center_w + pos_w_range) / upsampled_size[1],\n                            (1. / num_grid),\n                            rounding_mode='trunc')))\n\n                top = max(top_box, coord_h - 1)\n                down = min(down_box, coord_h + 1)\n                left = max(coord_w - 1, left_box)\n                right = min(right_box, coord_w + 1)\n\n                labels[top:(down + 1), left:(right + 1)] = gt_label\n                # ins\n                gt_mask = np.uint8(gt_mask.cpu().numpy())\n                # Follow the original implementation, F.interpolate is\n                # different from cv2 and opencv\n                gt_mask = mmcv.imrescale(gt_mask, scale=1. / self.mask_stride)\n                gt_mask = torch.from_numpy(gt_mask).to(device=device)\n\n                for i in range(top, down + 1):\n                    for j in range(left, right + 1):\n                        index = int(i * num_grid + j)\n                        this_mask_target = torch.zeros(\n                            [featmap_size[0], featmap_size[1]],\n                            dtype=torch.uint8,\n                            device=device)\n                        this_mask_target[:gt_mask.shape[0], :gt_mask.\n                                         shape[1]] = gt_mask\n                        mask_target.append(this_mask_target)\n                        pos_mask[index] = True\n                        pos_index.append(index)\n            if len(mask_target) == 0:\n                mask_target = torch.zeros(\n                    [0, featmap_size[0], featmap_size[1]],\n                    dtype=torch.uint8,\n                    device=device)\n            else:\n                mask_target = torch.stack(mask_target, 0)\n            mlvl_pos_mask_targets.append(mask_target)\n            mlvl_labels.append(labels)\n            mlvl_pos_masks.append(pos_mask)\n            mlvl_pos_indexes.append(pos_index)\n        return (mlvl_pos_mask_targets, mlvl_labels, mlvl_pos_masks,\n                mlvl_pos_indexes)\n\n    @force_fp32(apply_to=('mlvl_kernel_preds', 'mlvl_cls_preds', 'mask_feats'))\n    def loss(self,\n             mlvl_kernel_preds,\n             mlvl_cls_preds,\n             mask_feats,\n             gt_labels,\n             gt_masks,\n             img_metas,\n             gt_bboxes=None,\n             **kwargs):\n        \"\"\"Calculate the loss of total batch.\n\n        Args:\n            mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel\n                prediction. The kernel is used to generate instance\n                segmentation masks by dynamic convolution. Each element in the\n                list has shape\n                (batch_size, kernel_out_channels, num_grids, num_grids).\n            mlvl_cls_preds (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes, num_grids, num_grids).\n            mask_feats (Tensor): Unified mask feature map used to generate\n                instance segmentation masks by dynamic convolution. Has shape\n                (batch_size, mask_out_channels, h, w).\n            gt_labels (list[Tensor]): Labels of multiple images.\n            gt_masks (list[Tensor]): Ground truth masks of multiple images.\n                Each has shape (num_instances, h, w).\n            img_metas (list[dict]): Meta information of multiple images.\n            gt_bboxes (list[Tensor]): Ground truth bboxes of multiple\n                images. Default: None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_size = mask_feats.size()[-2:]\n\n        pos_mask_targets, labels, pos_masks, pos_indexes = multi_apply(\n            self._get_targets_single,\n            gt_bboxes,\n            gt_labels,\n            gt_masks,\n            featmap_size=featmap_size)\n\n        mlvl_mask_targets = [\n            torch.cat(lvl_mask_targets, 0)\n            for lvl_mask_targets in zip(*pos_mask_targets)\n        ]\n\n        mlvl_pos_kernel_preds = []\n        for lvl_kernel_preds, lvl_pos_indexes in zip(mlvl_kernel_preds,\n                                                     zip(*pos_indexes)):\n            lvl_pos_kernel_preds = []\n            for img_lvl_kernel_preds, img_lvl_pos_indexes in zip(\n                    lvl_kernel_preds, lvl_pos_indexes):\n                img_lvl_pos_kernel_preds = img_lvl_kernel_preds.view(\n                    img_lvl_kernel_preds.shape[0], -1)[:, img_lvl_pos_indexes]\n                lvl_pos_kernel_preds.append(img_lvl_pos_kernel_preds)\n            mlvl_pos_kernel_preds.append(lvl_pos_kernel_preds)\n\n        # make multilevel mlvl_mask_pred\n        mlvl_mask_preds = []\n        for lvl_pos_kernel_preds in mlvl_pos_kernel_preds:\n            lvl_mask_preds = []\n            for img_id, img_lvl_pos_kernel_pred in enumerate(\n                    lvl_pos_kernel_preds):\n                if img_lvl_pos_kernel_pred.size()[-1] == 0:\n                    continue\n                img_mask_feats = mask_feats[[img_id]]\n                h, w = img_mask_feats.shape[-2:]\n                num_kernel = img_lvl_pos_kernel_pred.shape[1]\n                img_lvl_mask_pred = F.conv2d(\n                    img_mask_feats,\n                    img_lvl_pos_kernel_pred.permute(1, 0).view(\n                        num_kernel, -1, self.dynamic_conv_size,\n                        self.dynamic_conv_size),\n                    stride=1).view(-1, h, w)\n                lvl_mask_preds.append(img_lvl_mask_pred)\n            if len(lvl_mask_preds) == 0:\n                lvl_mask_preds = None\n            else:\n                lvl_mask_preds = torch.cat(lvl_mask_preds, 0)\n            mlvl_mask_preds.append(lvl_mask_preds)\n        # dice loss\n        num_pos = 0\n        for img_pos_masks in pos_masks:\n            for lvl_img_pos_masks in img_pos_masks:\n                num_pos += lvl_img_pos_masks.count_nonzero()\n\n        loss_mask = []\n        for lvl_mask_preds, lvl_mask_targets in zip(mlvl_mask_preds,\n                                                    mlvl_mask_targets):\n            if lvl_mask_preds is None:\n                continue\n            loss_mask.append(\n                self.loss_mask(\n                    lvl_mask_preds,\n                    lvl_mask_targets,\n                    reduction_override='none'))\n        if num_pos > 0:\n            loss_mask = torch.cat(loss_mask).sum() / num_pos\n        else:\n            loss_mask = mask_feats.sum() * 0\n\n        # cate\n        flatten_labels = [\n            torch.cat(\n                [img_lvl_labels.flatten() for img_lvl_labels in lvl_labels])\n            for lvl_labels in zip(*labels)\n        ]\n        flatten_labels = torch.cat(flatten_labels)\n\n        flatten_cls_preds = [\n            lvl_cls_preds.permute(0, 2, 3, 1).reshape(-1, self.num_classes)\n            for lvl_cls_preds in mlvl_cls_preds\n        ]\n        flatten_cls_preds = torch.cat(flatten_cls_preds)\n\n        loss_cls = self.loss_cls(\n            flatten_cls_preds, flatten_labels, avg_factor=num_pos + 1)\n        return dict(loss_mask=loss_mask, loss_cls=loss_cls)\n\n    @force_fp32(\n        apply_to=('mlvl_kernel_preds', 'mlvl_cls_scores', 'mask_feats'))\n    def get_results(self, mlvl_kernel_preds, mlvl_cls_scores, mask_feats,\n                    img_metas, **kwargs):\n        \"\"\"Get multi-image mask results.\n\n        Args:\n            mlvl_kernel_preds (list[Tensor]): Multi-level dynamic kernel\n                prediction. The kernel is used to generate instance\n                segmentation masks by dynamic convolution. Each element in the\n                list has shape\n                (batch_size, kernel_out_channels, num_grids, num_grids).\n            mlvl_cls_scores (list[Tensor]): Multi-level scores. Each element\n                in the list has shape\n                (batch_size, num_classes, num_grids, num_grids).\n            mask_feats (Tensor): Unified mask feature map used to generate\n                instance segmentation masks by dynamic convolution. Has shape\n                (batch_size, mask_out_channels, h, w).\n            img_metas (list[dict]): Meta information of all images.\n\n        Returns:\n            list[:obj:`InstanceData`]: Processed results of multiple\n            images.Each :obj:`InstanceData` usually contains\n            following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n        num_levels = len(mlvl_cls_scores)\n        assert len(mlvl_kernel_preds) == len(mlvl_cls_scores)\n\n        for lvl in range(num_levels):\n            cls_scores = mlvl_cls_scores[lvl]\n            cls_scores = cls_scores.sigmoid()\n            local_max = F.max_pool2d(cls_scores, 2, stride=1, padding=1)\n            keep_mask = local_max[:, :, :-1, :-1] == cls_scores\n            cls_scores = cls_scores * keep_mask\n            mlvl_cls_scores[lvl] = cls_scores.permute(0, 2, 3, 1)\n\n        result_list = []\n        for img_id in range(len(img_metas)):\n            img_cls_pred = [\n                mlvl_cls_scores[lvl][img_id].view(-1, self.cls_out_channels)\n                for lvl in range(num_levels)\n            ]\n            img_mask_feats = mask_feats[[img_id]]\n            img_kernel_pred = [\n                mlvl_kernel_preds[lvl][img_id].permute(1, 2, 0).view(\n                    -1, self.kernel_out_channels) for lvl in range(num_levels)\n            ]\n            img_cls_pred = torch.cat(img_cls_pred, dim=0)\n            img_kernel_pred = torch.cat(img_kernel_pred, dim=0)\n            result = self._get_results_single(\n                img_kernel_pred,\n                img_cls_pred,\n                img_mask_feats,\n                img_meta=img_metas[img_id])\n            result_list.append(result)\n        return result_list\n\n    def _get_results_single(self,\n                            kernel_preds,\n                            cls_scores,\n                            mask_feats,\n                            img_meta,\n                            cfg=None):\n        \"\"\"Get processed mask related results of single image.\n\n        Args:\n            kernel_preds (Tensor): Dynamic kernel prediction of all points\n                in single image, has shape\n                (num_points, kernel_out_channels).\n            cls_scores (Tensor): Classification score of all points\n                in single image, has shape (num_points, num_classes).\n            mask_preds (Tensor): Mask prediction of all points in\n                single image, has shape (num_points, feat_h, feat_w).\n            img_meta (dict): Meta information of corresponding image.\n            cfg (dict, optional): Config used in test phase.\n                Default: None.\n\n        Returns:\n            :obj:`InstanceData`: Processed results of single image.\n             it usually contains following keys.\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,).\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n        \"\"\"\n\n        def empty_results(results, cls_scores):\n            \"\"\"Generate a empty results.\"\"\"\n            results.scores = cls_scores.new_ones(0)\n            results.masks = cls_scores.new_zeros(0, *results.ori_shape[:2])\n            results.labels = cls_scores.new_ones(0)\n            return results\n\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(kernel_preds) == len(cls_scores)\n        results = InstanceData(img_meta)\n\n        featmap_size = mask_feats.size()[-2:]\n\n        img_shape = results.img_shape\n        ori_shape = results.ori_shape\n\n        # overall info\n        h, w, _ = img_shape\n        upsampled_size = (featmap_size[0] * self.mask_stride,\n                          featmap_size[1] * self.mask_stride)\n\n        # process.\n        score_mask = (cls_scores > cfg.score_thr)\n        cls_scores = cls_scores[score_mask]\n        if len(cls_scores) == 0:\n            return empty_results(results, cls_scores)\n\n        # cate_labels & kernel_preds\n        inds = score_mask.nonzero()\n        cls_labels = inds[:, 1]\n        kernel_preds = kernel_preds[inds[:, 0]]\n\n        # trans vector.\n        lvl_interval = cls_labels.new_tensor(self.num_grids).pow(2).cumsum(0)\n        strides = kernel_preds.new_ones(lvl_interval[-1])\n\n        strides[:lvl_interval[0]] *= self.strides[0]\n        for lvl in range(1, self.num_levels):\n            strides[lvl_interval[lvl -\n                                 1]:lvl_interval[lvl]] *= self.strides[lvl]\n        strides = strides[inds[:, 0]]\n\n        # mask encoding.\n        kernel_preds = kernel_preds.view(\n            kernel_preds.size(0), -1, self.dynamic_conv_size,\n            self.dynamic_conv_size)\n        mask_preds = F.conv2d(\n            mask_feats, kernel_preds, stride=1).squeeze(0).sigmoid()\n        # mask.\n        masks = mask_preds > cfg.mask_thr\n        sum_masks = masks.sum((1, 2)).float()\n        keep = sum_masks > strides\n        if keep.sum() == 0:\n            return empty_results(results, cls_scores)\n        masks = masks[keep]\n        mask_preds = mask_preds[keep]\n        sum_masks = sum_masks[keep]\n        cls_scores = cls_scores[keep]\n        cls_labels = cls_labels[keep]\n\n        # maskness.\n        mask_scores = (mask_preds * masks).sum((1, 2)) / sum_masks\n        cls_scores *= mask_scores\n\n        scores, labels, _, keep_inds = mask_matrix_nms(\n            masks,\n            cls_labels,\n            cls_scores,\n            mask_area=sum_masks,\n            nms_pre=cfg.nms_pre,\n            max_num=cfg.max_per_img,\n            kernel=cfg.kernel,\n            sigma=cfg.sigma,\n            filter_thr=cfg.filter_thr)\n        mask_preds = mask_preds[keep_inds]\n        mask_preds = F.interpolate(\n            mask_preds.unsqueeze(0),\n            size=upsampled_size,\n            mode='bilinear',\n            align_corners=False)[:, :, :h, :w]\n        mask_preds = F.interpolate(\n            mask_preds,\n            size=ori_shape[:2],\n            mode='bilinear',\n            align_corners=False).squeeze(0)\n        masks = mask_preds > cfg.mask_thr\n\n        results.masks = masks\n        results.labels = labels\n        results.scores = scores\n\n        return results\n"
  },
  {
    "path": "mmdet/models/dense_heads/ssd_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (build_assigner, build_bbox_coder,\n                        build_prior_generator, build_sampler, multi_apply)\nfrom ..builder import HEADS\nfrom ..losses import smooth_l1_loss\nfrom .anchor_head import AnchorHead\n\n\n# TODO: add loss evaluator for SSD\n@HEADS.register_module()\nclass SSDHead(AnchorHead):\n    \"\"\"SSD head used in https://arxiv.org/abs/1512.02325.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        stacked_convs (int): Number of conv layers in cls and reg tower.\n            Default: 0.\n        feat_channels (int): Number of hidden channels when stacked_convs\n            > 0. Default: 256.\n        use_depthwise (bool): Whether to use DepthwiseSeparableConv.\n            Default: False.\n        conv_cfg (dict): Dictionary to construct and config conv layer.\n            Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: None.\n        act_cfg (dict): Dictionary to construct and config activation layer.\n            Default: None.\n        anchor_generator (dict): Config dict for anchor generator\n        bbox_coder (dict): Config of bounding box coder.\n        reg_decoded_bbox (bool): If true, the regression loss would be\n            applied directly on decoded bounding boxes, converting both\n            the predicted boxes and regression targets to absolute\n            coordinates format. Default False. It should be `True` when\n            using `IoULoss`, `GIoULoss`, or `DIoULoss` in the bbox head.\n        train_cfg (dict): Training config of anchor head.\n        test_cfg (dict): Testing config of anchor head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_classes=80,\n                 in_channels=(512, 1024, 512, 256, 256, 256),\n                 stacked_convs=0,\n                 feat_channels=256,\n                 use_depthwise=False,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 act_cfg=None,\n                 anchor_generator=dict(\n                     type='SSDAnchorGenerator',\n                     scale_major=False,\n                     input_size=300,\n                     strides=[8, 16, 32, 64, 100, 300],\n                     ratios=([2], [2, 3], [2, 3], [2, 3], [2], [2]),\n                     basesize_ratio_range=(0.1, 0.9)),\n                 bbox_coder=dict(\n                     type='DeltaXYWHBBoxCoder',\n                     clip_border=True,\n                     target_means=[.0, .0, .0, .0],\n                     target_stds=[1.0, 1.0, 1.0, 1.0],\n                 ),\n                 reg_decoded_bbox=False,\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=dict(\n                     type='Xavier',\n                     layer='Conv2d',\n                     distribution='uniform',\n                     bias=0)):\n        super(AnchorHead, self).__init__(init_cfg)\n        self.num_classes = num_classes\n        self.in_channels = in_channels\n        self.stacked_convs = stacked_convs\n        self.feat_channels = feat_channels\n        self.use_depthwise = use_depthwise\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n\n        self.cls_out_channels = num_classes + 1  # add background class\n        self.prior_generator = build_prior_generator(anchor_generator)\n\n        # Usually the numbers of anchors for each level are the same\n        # except SSD detectors. So it is an int in the most dense\n        # heads but a list of int in SSDHead\n        self.num_base_priors = self.prior_generator.num_base_priors\n\n        self._init_layers()\n\n        self.bbox_coder = build_bbox_coder(bbox_coder)\n        self.reg_decoded_bbox = reg_decoded_bbox\n        self.use_sigmoid_cls = False\n        self.cls_focal_loss = False\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        # set sampling=False for archor_target\n        self.sampling = False\n        if self.train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            # SSD sampling=False so use PseudoSampler\n            sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n        self.fp16_enabled = False\n\n    @property\n    def num_anchors(self):\n        \"\"\"\n        Returns:\n            list[int]: Number of base_anchors on each point of each level.\n        \"\"\"\n        warnings.warn('DeprecationWarning: `num_anchors` is deprecated, '\n                      'please use \"num_base_priors\" instead')\n        return self.num_base_priors\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.cls_convs = nn.ModuleList()\n        self.reg_convs = nn.ModuleList()\n        # TODO: Use registry to choose ConvModule type\n        conv = DepthwiseSeparableConvModule \\\n            if self.use_depthwise else ConvModule\n\n        for channel, num_base_priors in zip(self.in_channels,\n                                            self.num_base_priors):\n            cls_layers = []\n            reg_layers = []\n            in_channel = channel\n            # build stacked conv tower, not used in default ssd\n            for i in range(self.stacked_convs):\n                cls_layers.append(\n                    conv(\n                        in_channel,\n                        self.feat_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n                reg_layers.append(\n                    conv(\n                        in_channel,\n                        self.feat_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n                in_channel = self.feat_channels\n            # SSD-Lite head\n            if self.use_depthwise:\n                cls_layers.append(\n                    ConvModule(\n                        in_channel,\n                        in_channel,\n                        3,\n                        padding=1,\n                        groups=in_channel,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n                reg_layers.append(\n                    ConvModule(\n                        in_channel,\n                        in_channel,\n                        3,\n                        padding=1,\n                        groups=in_channel,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg,\n                        act_cfg=self.act_cfg))\n            cls_layers.append(\n                nn.Conv2d(\n                    in_channel,\n                    num_base_priors * self.cls_out_channels,\n                    kernel_size=1 if self.use_depthwise else 3,\n                    padding=0 if self.use_depthwise else 1))\n            reg_layers.append(\n                nn.Conv2d(\n                    in_channel,\n                    num_base_priors * 4,\n                    kernel_size=1 if self.use_depthwise else 3,\n                    padding=0 if self.use_depthwise else 1))\n            self.cls_convs.append(nn.Sequential(*cls_layers))\n            self.reg_convs.append(nn.Sequential(*reg_layers))\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple:\n                cls_scores (list[Tensor]): Classification scores for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_anchors * num_classes.\n                bbox_preds (list[Tensor]): Box energies / deltas for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_anchors * 4.\n        \"\"\"\n        cls_scores = []\n        bbox_preds = []\n        for feat, reg_conv, cls_conv in zip(feats, self.reg_convs,\n                                            self.cls_convs):\n            cls_scores.append(cls_conv(feat))\n            bbox_preds.append(reg_conv(feat))\n        return cls_scores, bbox_preds\n\n    def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weights,\n                    bbox_targets, bbox_weights, num_total_samples):\n        \"\"\"Compute loss of a single image.\n\n        Args:\n            cls_score (Tensor): Box scores for eachimage\n                Has shape (num_total_anchors, num_classes).\n            bbox_pred (Tensor): Box energies / deltas for each image\n                level with shape (num_total_anchors, 4).\n            anchors (Tensor): Box reference for each scale level with shape\n                (num_total_anchors, 4).\n            labels (Tensor): Labels of each anchors with shape\n                (num_total_anchors,).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (num_total_anchors,)\n            bbox_targets (Tensor): BBox regression targets of each anchor\n                weight shape (num_total_anchors, 4).\n            bbox_weights (Tensor): BBox regression loss weights of each anchor\n                with shape (num_total_anchors, 4).\n            num_total_samples (int): If sampling, num total samples equal to\n                the number of total anchors; Otherwise, it is the number of\n                positive anchors.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        loss_cls_all = F.cross_entropy(\n            cls_score, labels, reduction='none') * label_weights\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(\n            as_tuple=False).reshape(-1)\n        neg_inds = (labels == self.num_classes).nonzero(\n            as_tuple=False).view(-1)\n\n        num_pos_samples = pos_inds.size(0)\n        num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples\n        if num_neg_samples > neg_inds.size(0):\n            num_neg_samples = neg_inds.size(0)\n        topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)\n        loss_cls_pos = loss_cls_all[pos_inds].sum()\n        loss_cls_neg = topk_loss_cls_neg.sum()\n        loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples\n\n        if self.reg_decoded_bbox:\n            # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n            # is applied directly on the decoded bounding boxes, it\n            # decodes the already encoded coordinates to absolute format.\n            bbox_pred = self.bbox_coder.decode(anchor, bbox_pred)\n\n        loss_bbox = smooth_l1_loss(\n            bbox_pred,\n            bbox_targets,\n            bbox_weights,\n            beta=self.train_cfg.smoothl1_beta,\n            avg_factor=num_total_samples)\n        return loss_cls[None], loss_bbox\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            gt_bboxes (list[Tensor]): each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=1,\n            unmap_outputs=True)\n        if cls_reg_targets is None:\n            return None\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg) = cls_reg_targets\n\n        num_images = len(img_metas)\n        all_cls_scores = torch.cat([\n            s.permute(0, 2, 3, 1).reshape(\n                num_images, -1, self.cls_out_channels) for s in cls_scores\n        ], 1)\n        all_labels = torch.cat(labels_list, -1).view(num_images, -1)\n        all_label_weights = torch.cat(label_weights_list,\n                                      -1).view(num_images, -1)\n        all_bbox_preds = torch.cat([\n            b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)\n            for b in bbox_preds\n        ], -2)\n        all_bbox_targets = torch.cat(bbox_targets_list,\n                                     -2).view(num_images, -1, 4)\n        all_bbox_weights = torch.cat(bbox_weights_list,\n                                     -2).view(num_images, -1, 4)\n\n        # concat all level anchors to a single tensor\n        all_anchors = []\n        for i in range(num_images):\n            all_anchors.append(torch.cat(anchor_list[i]))\n\n        losses_cls, losses_bbox = multi_apply(\n            self.loss_single,\n            all_cls_scores,\n            all_bbox_preds,\n            all_anchors,\n            all_labels,\n            all_label_weights,\n            all_bbox_targets,\n            all_bbox_weights,\n            num_total_samples=num_total_pos)\n        return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n"
  },
  {
    "path": "mmdet/models/dense_heads/tood_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, Scale, bias_init_with_prob, normal_init\nfrom mmcv.ops import deform_conv2d\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (anchor_inside_flags, build_assigner, distance2bbox,\n                        images_to_levels, multi_apply, reduce_mean, unmap)\nfrom mmdet.core.utils import filter_scores_and_topk\nfrom mmdet.models.utils import sigmoid_geometric_mean\nfrom ..builder import HEADS, build_loss\nfrom .atss_head import ATSSHead\n\n\nclass TaskDecomposition(nn.Module):\n    \"\"\"Task decomposition module in task-aligned predictor of TOOD.\n\n    Args:\n        feat_channels (int): Number of feature channels in TOOD head.\n        stacked_convs (int): Number of conv layers in TOOD head.\n        la_down_rate (int): Downsample rate of layer attention.\n        conv_cfg (dict): Config dict for convolution layer.\n        norm_cfg (dict): Config dict for normalization layer.\n    \"\"\"\n\n    def __init__(self,\n                 feat_channels,\n                 stacked_convs,\n                 la_down_rate=8,\n                 conv_cfg=None,\n                 norm_cfg=None):\n        super(TaskDecomposition, self).__init__()\n        self.feat_channels = feat_channels\n        self.stacked_convs = stacked_convs\n        self.in_channels = self.feat_channels * self.stacked_convs\n        self.norm_cfg = norm_cfg\n        self.layer_attention = nn.Sequential(\n            nn.Conv2d(self.in_channels, self.in_channels // la_down_rate, 1),\n            nn.ReLU(inplace=True),\n            nn.Conv2d(\n                self.in_channels // la_down_rate,\n                self.stacked_convs,\n                1,\n                padding=0), nn.Sigmoid())\n\n        self.reduction_conv = ConvModule(\n            self.in_channels,\n            self.feat_channels,\n            1,\n            stride=1,\n            padding=0,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            bias=norm_cfg is None)\n\n    def init_weights(self):\n        for m in self.layer_attention.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, std=0.001)\n        normal_init(self.reduction_conv.conv, std=0.01)\n\n    def forward(self, feat, avg_feat=None):\n        b, c, h, w = feat.shape\n        if avg_feat is None:\n            avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))\n        weight = self.layer_attention(avg_feat)\n\n        # here we first compute the product between layer attention weight and\n        # conv weight, and then compute the convolution between new conv weight\n        # and feature map, in order to save memory and FLOPs.\n        conv_weight = weight.reshape(\n            b, 1, self.stacked_convs,\n            1) * self.reduction_conv.conv.weight.reshape(\n                1, self.feat_channels, self.stacked_convs, self.feat_channels)\n        conv_weight = conv_weight.reshape(b, self.feat_channels,\n                                          self.in_channels)\n        feat = feat.reshape(b, self.in_channels, h * w)\n        feat = torch.bmm(conv_weight, feat).reshape(b, self.feat_channels, h,\n                                                    w)\n        if self.norm_cfg is not None:\n            feat = self.reduction_conv.norm(feat)\n        feat = self.reduction_conv.activate(feat)\n\n        return feat\n\n\n@HEADS.register_module()\nclass TOODHead(ATSSHead):\n    \"\"\"TOODHead used in `TOOD: Task-aligned One-stage Object Detection.\n\n    <https://arxiv.org/abs/2108.07755>`_.\n\n    TOOD uses Task-aligned head (T-head) and is optimized by Task Alignment\n    Learning (TAL).\n\n    Args:\n        num_dcn (int): Number of deformable convolution in the head.\n            Default: 0.\n        anchor_type (str): If set to `anchor_free`, the head will use centers\n            to regress bboxes. If set to `anchor_based`, the head will\n            regress bboxes based on anchors. Default: `anchor_free`.\n        initial_loss_cls (dict): Config of initial loss.\n\n    Example:\n        >>> self = TOODHead(11, 7)\n        >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]\n        >>> cls_score, bbox_pred = self.forward(feats)\n        >>> assert len(cls_score) == len(self.scales)\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 num_dcn=0,\n                 anchor_type='anchor_free',\n                 initial_loss_cls=dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     activated=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=1.0),\n                 **kwargs):\n        assert anchor_type in ['anchor_free', 'anchor_based']\n        self.num_dcn = num_dcn\n        self.anchor_type = anchor_type\n        self.epoch = 0  # which would be update in SetEpochInfoHook!\n        super(TOODHead, self).__init__(num_classes, in_channels, **kwargs)\n\n        if self.train_cfg:\n            self.initial_epoch = self.train_cfg.initial_epoch\n            self.initial_assigner = build_assigner(\n                self.train_cfg.initial_assigner)\n            self.initial_loss_cls = build_loss(initial_loss_cls)\n            self.assigner = self.initial_assigner\n            self.alignment_assigner = build_assigner(self.train_cfg.assigner)\n            self.alpha = self.train_cfg.alpha\n            self.beta = self.train_cfg.beta\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.inter_convs = nn.ModuleList()\n        for i in range(self.stacked_convs):\n            if i < self.num_dcn:\n                conv_cfg = dict(type='DCNv2', deform_groups=4)\n            else:\n                conv_cfg = self.conv_cfg\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.inter_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg))\n\n        self.cls_decomp = TaskDecomposition(self.feat_channels,\n                                            self.stacked_convs,\n                                            self.stacked_convs * 8,\n                                            self.conv_cfg, self.norm_cfg)\n        self.reg_decomp = TaskDecomposition(self.feat_channels,\n                                            self.stacked_convs,\n                                            self.stacked_convs * 8,\n                                            self.conv_cfg, self.norm_cfg)\n\n        self.tood_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.tood_reg = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n\n        self.cls_prob_module = nn.Sequential(\n            nn.Conv2d(self.feat_channels * self.stacked_convs,\n                      self.feat_channels // 4, 1), nn.ReLU(inplace=True),\n            nn.Conv2d(self.feat_channels // 4, 1, 3, padding=1))\n        self.reg_offset_module = nn.Sequential(\n            nn.Conv2d(self.feat_channels * self.stacked_convs,\n                      self.feat_channels // 4, 1), nn.ReLU(inplace=True),\n            nn.Conv2d(self.feat_channels // 4, 4 * 2, 3, padding=1))\n\n        self.scales = nn.ModuleList(\n            [Scale(1.0) for _ in self.prior_generator.strides])\n\n    def init_weights(self):\n        \"\"\"Initialize weights of the head.\"\"\"\n        bias_cls = bias_init_with_prob(0.01)\n        for m in self.inter_convs:\n            normal_init(m.conv, std=0.01)\n        for m in self.cls_prob_module:\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, std=0.01)\n        for m in self.reg_offset_module:\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, std=0.001)\n        normal_init(self.cls_prob_module[-1], std=0.01, bias=bias_cls)\n\n        self.cls_decomp.init_weights()\n        self.reg_decomp.init_weights()\n\n        normal_init(self.tood_cls, std=0.01, bias=bias_cls)\n        normal_init(self.tood_reg, std=0.01)\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple: Usually a tuple of classification scores and bbox prediction\n                cls_scores (list[Tensor]): Classification scores for all scale\n                    levels, each is a 4D-tensor, the channels number is\n                    num_anchors * num_classes.\n                bbox_preds (list[Tensor]): Decoded box for all scale levels,\n                    each is a 4D-tensor, the channels number is\n                    num_anchors * 4. In [tl_x, tl_y, br_x, br_y] format.\n        \"\"\"\n        cls_scores = []\n        bbox_preds = []\n        for idx, (x, scale, stride) in enumerate(\n                zip(feats, self.scales, self.prior_generator.strides)):\n            b, c, h, w = x.shape\n            anchor = self.prior_generator.single_level_grid_priors(\n                (h, w), idx, device=x.device)\n            anchor = torch.cat([anchor for _ in range(b)])\n            # extract task interactive features\n            inter_feats = []\n            for inter_conv in self.inter_convs:\n                x = inter_conv(x)\n                inter_feats.append(x)\n            feat = torch.cat(inter_feats, 1)\n\n            # task decomposition\n            avg_feat = F.adaptive_avg_pool2d(feat, (1, 1))\n            cls_feat = self.cls_decomp(feat, avg_feat)\n            reg_feat = self.reg_decomp(feat, avg_feat)\n\n            # cls prediction and alignment\n            cls_logits = self.tood_cls(cls_feat)\n            cls_prob = self.cls_prob_module(feat)\n            cls_score = sigmoid_geometric_mean(cls_logits, cls_prob)\n\n            # reg prediction and alignment\n            if self.anchor_type == 'anchor_free':\n                reg_dist = scale(self.tood_reg(reg_feat).exp()).float()\n                reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4)\n                reg_bbox = distance2bbox(\n                    self.anchor_center(anchor) / stride[0],\n                    reg_dist).reshape(b, h, w, 4).permute(0, 3, 1,\n                                                          2)  # (b, c, h, w)\n            elif self.anchor_type == 'anchor_based':\n                reg_dist = scale(self.tood_reg(reg_feat)).float()\n                reg_dist = reg_dist.permute(0, 2, 3, 1).reshape(-1, 4)\n                reg_bbox = self.bbox_coder.decode(anchor, reg_dist).reshape(\n                    b, h, w, 4).permute(0, 3, 1, 2) / stride[0]\n            else:\n                raise NotImplementedError(\n                    f'Unknown anchor type: {self.anchor_type}.'\n                    f'Please use `anchor_free` or `anchor_based`.')\n            reg_offset = self.reg_offset_module(feat)\n            bbox_pred = self.deform_sampling(reg_bbox.contiguous(),\n                                             reg_offset.contiguous())\n\n            # After deform_sampling, some boxes will become invalid (The\n            # left-top point is at the right or bottom of the right-bottom\n            # point), which will make the GIoULoss negative.\n            invalid_bbox_idx = (bbox_pred[:, [0]] > bbox_pred[:, [2]]) | \\\n                               (bbox_pred[:, [1]] > bbox_pred[:, [3]])\n            invalid_bbox_idx = invalid_bbox_idx.expand_as(bbox_pred)\n            bbox_pred = torch.where(invalid_bbox_idx, reg_bbox, bbox_pred)\n\n            cls_scores.append(cls_score)\n            bbox_preds.append(bbox_pred)\n        return tuple(cls_scores), tuple(bbox_preds)\n\n    def deform_sampling(self, feat, offset):\n        \"\"\"Sampling the feature x according to offset.\n\n        Args:\n            feat (Tensor): Feature\n            offset (Tensor): Spatial offset for feature sampling\n        \"\"\"\n        # it is an equivalent implementation of bilinear interpolation\n        b, c, h, w = feat.shape\n        weight = feat.new_ones(c, 1, 1, 1)\n        y = deform_conv2d(feat, offset, weight, 1, 0, 1, c, c)\n        return y\n\n    def anchor_center(self, anchors):\n        \"\"\"Get anchor centers from anchors.\n\n        Args:\n            anchors (Tensor): Anchor list with shape (N, 4), \"xyxy\" format.\n\n        Returns:\n            Tensor: Anchor centers with shape (N, 2), \"xy\" format.\n        \"\"\"\n        anchors_cx = (anchors[:, 2] + anchors[:, 0]) / 2\n        anchors_cy = (anchors[:, 3] + anchors[:, 1]) / 2\n        return torch.stack([anchors_cx, anchors_cy], dim=-1)\n\n    def loss_single(self, anchors, cls_score, bbox_pred, labels, label_weights,\n                    bbox_targets, alignment_metrics, stride):\n        \"\"\"Compute loss of a single scale level.\n\n        Args:\n            anchors (Tensor): Box reference for each scale level with shape\n                (N, num_total_anchors, 4).\n            cls_score (Tensor): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W).\n            bbox_pred (Tensor): Decoded bboxes for each scale\n                level with shape (N, num_anchors * 4, H, W).\n            labels (Tensor): Labels of each anchors with shape\n                (N, num_total_anchors).\n            label_weights (Tensor): Label weights of each anchor with shape\n                (N, num_total_anchors).\n            bbox_targets (Tensor): BBox regression targets of each anchor with\n                shape (N, num_total_anchors, 4).\n            alignment_metrics (Tensor): Alignment metrics with shape\n                (N, num_total_anchors).\n            stride (tuple[int]): Downsample stride of the feature map.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert stride[0] == stride[1], 'h stride is not equal to w stride!'\n        anchors = anchors.reshape(-1, 4)\n        cls_score = cls_score.permute(0, 2, 3, 1).reshape(\n            -1, self.cls_out_channels).contiguous()\n        bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4)\n        bbox_targets = bbox_targets.reshape(-1, 4)\n        labels = labels.reshape(-1)\n        alignment_metrics = alignment_metrics.reshape(-1)\n        label_weights = label_weights.reshape(-1)\n        targets = labels if self.epoch < self.initial_epoch else (\n            labels, alignment_metrics)\n        cls_loss_func = self.initial_loss_cls \\\n            if self.epoch < self.initial_epoch else self.loss_cls\n\n        loss_cls = cls_loss_func(\n            cls_score, targets, label_weights, avg_factor=1.0)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = ((labels >= 0)\n                    & (labels < bg_class_ind)).nonzero().squeeze(1)\n\n        if len(pos_inds) > 0:\n            pos_bbox_targets = bbox_targets[pos_inds]\n            pos_bbox_pred = bbox_pred[pos_inds]\n            pos_anchors = anchors[pos_inds]\n\n            pos_decode_bbox_pred = pos_bbox_pred\n            pos_decode_bbox_targets = pos_bbox_targets / stride[0]\n\n            # regression loss\n            pos_bbox_weight = self.centerness_target(\n                pos_anchors, pos_bbox_targets\n            ) if self.epoch < self.initial_epoch else alignment_metrics[\n                pos_inds]\n\n            loss_bbox = self.loss_bbox(\n                pos_decode_bbox_pred,\n                pos_decode_bbox_targets,\n                weight=pos_bbox_weight,\n                avg_factor=1.0)\n        else:\n            loss_bbox = bbox_pred.sum() * 0\n            pos_bbox_weight = bbox_targets.new_tensor(0.)\n\n        return loss_cls, loss_bbox, alignment_metrics.sum(\n        ), pos_bbox_weight.sum()\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Decoded box for each scale\n                level with shape (N, num_anchors * 4, H, W) in\n                [tl_x, tl_y, br_x, br_y] format.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (list[Tensor] | None): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_imgs = len(img_metas)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n        flatten_cls_scores = torch.cat([\n            cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                  self.cls_out_channels)\n            for cls_score in cls_scores\n        ], 1)\n        flatten_bbox_preds = torch.cat([\n            bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4) * stride[0]\n            for bbox_pred, stride in zip(bbox_preds,\n                                         self.prior_generator.strides)\n        ], 1)\n\n        cls_reg_targets = self.get_targets(\n            flatten_cls_scores,\n            flatten_bbox_preds,\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels)\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         alignment_metrics_list) = cls_reg_targets\n\n        losses_cls, losses_bbox,\\\n            cls_avg_factors, bbox_avg_factors = multi_apply(\n                self.loss_single,\n                anchor_list,\n                cls_scores,\n                bbox_preds,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                alignment_metrics_list,\n                self.prior_generator.strides)\n\n        cls_avg_factor = reduce_mean(sum(cls_avg_factors)).clamp_(min=1).item()\n        losses_cls = list(map(lambda x: x / cls_avg_factor, losses_cls))\n\n        bbox_avg_factor = reduce_mean(\n            sum(bbox_avg_factors)).clamp_(min=1).item()\n        losses_bbox = list(map(lambda x: x / bbox_avg_factor, losses_bbox))\n        return dict(loss_cls=losses_cls, loss_bbox=losses_bbox)\n\n    def _get_bboxes_single(self,\n                           cls_score_list,\n                           bbox_pred_list,\n                           score_factor_list,\n                           mlvl_priors,\n                           img_meta,\n                           cfg,\n                           rescale=False,\n                           with_nms=True,\n                           **kwargs):\n        \"\"\"Transform outputs of a single image into bbox predictions.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores from all scale\n                levels of a single image, each item has shape\n                (num_priors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas from\n                all scale levels of a single image, each item has shape\n                (num_priors * 4, H, W).\n            score_factor_list (list[Tensor]): Score factor from all scale\n                levels of a single image, each item has shape\n                (num_priors * 1, H, W).\n            mlvl_priors (list[Tensor]): Each element in the list is\n                the priors of a single level in feature pyramid. In all\n                anchor-based methods, it has shape (num_priors, 4). In\n                all anchor-free methods, it has shape (num_priors, 2)\n                when `with_stride=True`, otherwise it still has shape\n                (num_priors, 4).\n            img_meta (dict): Image meta info.\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            tuple[Tensor]: Results of detected bboxes and labels. If with_nms\n                is False and mlvl_score_factor is None, return mlvl_bboxes and\n                mlvl_scores, else return mlvl_bboxes, mlvl_scores and\n                mlvl_score_factor. Usually with_nms is False is used for aug\n                test. If with_nms is True, then return the following format\n\n                - det_bboxes (Tensor): Predicted bboxes with shape \\\n                    [num_bboxes, 5], where the first 4 columns are bounding \\\n                    box positions (tl_x, tl_y, br_x, br_y) and the 5-th \\\n                    column are scores between 0 and 1.\n                - det_labels (Tensor): Predicted labels of the corresponding \\\n                    box with shape [num_bboxes].\n        \"\"\"\n\n        cfg = self.test_cfg if cfg is None else cfg\n        nms_pre = cfg.get('nms_pre', -1)\n\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_labels = []\n        for cls_score, bbox_pred, priors, stride in zip(\n                cls_score_list, bbox_pred_list, mlvl_priors,\n                self.prior_generator.strides):\n\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4) * stride[0]\n            scores = cls_score.permute(1, 2,\n                                       0).reshape(-1, self.cls_out_channels)\n\n            # After https://github.com/open-mmlab/mmdetection/pull/6268/,\n            # this operation keeps fewer bboxes under the same `nms_pre`.\n            # There is no difference in performance for most models. If you\n            # find a slight drop in performance, you can set a larger\n            # `nms_pre` than before.\n            results = filter_scores_and_topk(\n                scores, cfg.score_thr, nms_pre,\n                dict(bbox_pred=bbox_pred, priors=priors))\n            scores, labels, keep_idxs, filtered_results = results\n\n            bboxes = filtered_results['bbox_pred']\n\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_labels.append(labels)\n\n        return self._bbox_post_process(mlvl_scores, mlvl_labels, mlvl_bboxes,\n                                       img_meta['scale_factor'], cfg, rescale,\n                                       with_nms, None, **kwargs)\n\n    def get_targets(self,\n                    cls_scores,\n                    bbox_preds,\n                    anchor_list,\n                    valid_flag_list,\n                    gt_bboxes_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None,\n                    gt_labels_list=None,\n                    label_channels=1,\n                    unmap_outputs=True):\n        \"\"\"Compute regression and classification targets for anchors in\n        multiple images.\n\n        Args:\n            cls_scores (Tensor): Classification predictions of images,\n                a 3D-Tensor with shape [num_imgs, num_priors, num_classes].\n            bbox_preds (Tensor): Decoded bboxes predictions of one image,\n                a 3D-Tensor with shape [num_imgs, num_priors, 4] in [tl_x,\n                tl_y, br_x, br_y] format.\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, 4).\n            valid_flag_list (list[list[Tensor]]): Multi level valid flags of\n                each image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_anchors, )\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be\n                ignored.\n            gt_labels_list (list[Tensor]): Ground truth labels of each box.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: a tuple containing learning targets.\n\n                - anchors_list (list[list[Tensor]]): Anchors of each level.\n                - labels_list (list[Tensor]): Labels of each level.\n                - label_weights_list (list[Tensor]): Label weights of each\n                  level.\n                - bbox_targets_list (list[Tensor]): BBox targets of each level.\n                - norm_alignment_metrics_list (list[Tensor]): Normalized\n                  alignment metrics of each level.\n        \"\"\"\n        num_imgs = len(img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n        num_level_anchors_list = [num_level_anchors] * num_imgs\n\n        # concat all level anchors and flags to a single tensor\n        for i in range(num_imgs):\n            assert len(anchor_list[i]) == len(valid_flag_list[i])\n            anchor_list[i] = torch.cat(anchor_list[i])\n            valid_flag_list[i] = torch.cat(valid_flag_list[i])\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n        if gt_labels_list is None:\n            gt_labels_list = [None for _ in range(num_imgs)]\n        # anchor_list: list(b * [-1, 4])\n\n        if self.epoch < self.initial_epoch:\n            (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n             all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(\n                 super()._get_target_single,\n                 anchor_list,\n                 valid_flag_list,\n                 num_level_anchors_list,\n                 gt_bboxes_list,\n                 gt_bboxes_ignore_list,\n                 gt_labels_list,\n                 img_metas,\n                 label_channels=label_channels,\n                 unmap_outputs=unmap_outputs)\n            all_assign_metrics = [\n                weight[..., 0] for weight in all_bbox_weights\n            ]\n        else:\n            (all_anchors, all_labels, all_label_weights, all_bbox_targets,\n             all_assign_metrics) = multi_apply(\n                 self._get_target_single,\n                 cls_scores,\n                 bbox_preds,\n                 anchor_list,\n                 valid_flag_list,\n                 gt_bboxes_list,\n                 gt_bboxes_ignore_list,\n                 gt_labels_list,\n                 img_metas,\n                 label_channels=label_channels,\n                 unmap_outputs=unmap_outputs)\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n\n        # split targets to a list w.r.t. multiple levels\n        anchors_list = images_to_levels(all_anchors, num_level_anchors)\n        labels_list = images_to_levels(all_labels, num_level_anchors)\n        label_weights_list = images_to_levels(all_label_weights,\n                                              num_level_anchors)\n        bbox_targets_list = images_to_levels(all_bbox_targets,\n                                             num_level_anchors)\n        norm_alignment_metrics_list = images_to_levels(all_assign_metrics,\n                                                       num_level_anchors)\n\n        return (anchors_list, labels_list, label_weights_list,\n                bbox_targets_list, norm_alignment_metrics_list)\n\n    def _get_target_single(self,\n                           cls_scores,\n                           bbox_preds,\n                           flat_anchors,\n                           valid_flags,\n                           gt_bboxes,\n                           gt_bboxes_ignore,\n                           gt_labels,\n                           img_meta,\n                           label_channels=1,\n                           unmap_outputs=True):\n        \"\"\"Compute regression, classification targets for anchors in a single\n        image.\n\n        Args:\n            cls_scores (list(Tensor)): Box scores for each image.\n            bbox_preds (list(Tensor)): Box energies / deltas for each image.\n            flat_anchors (Tensor): Multi-level anchors of the image, which are\n                concatenated into a single tensor of shape (num_anchors ,4)\n            valid_flags (Tensor): Multi level valid flags of the image,\n                which are concatenated into a single tensor of\n                    shape (num_anchors,).\n            gt_bboxes (Tensor): Ground truth bboxes of the image,\n                shape (num_gts, 4).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts,).\n            img_meta (dict): Meta info of the image.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: N is the number of total anchors in the image.\n                anchors (Tensor): All anchors in the image with shape (N, 4).\n                labels (Tensor): Labels of all anchors in the image with shape\n                    (N,).\n                label_weights (Tensor): Label weights of all anchor in the\n                    image with shape (N,).\n                bbox_targets (Tensor): BBox targets of all anchors in the\n                    image with shape (N, 4).\n                norm_alignment_metrics (Tensor): Normalized alignment metrics\n                    of all priors in the image with shape (N,).\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg.allowed_border)\n        if not inside_flags.any():\n            return (None, ) * 7\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n        assign_result = self.alignment_assigner.assign(\n            cls_scores[inside_flags, :], bbox_preds[inside_flags, :], anchors,\n            gt_bboxes, gt_bboxes_ignore, gt_labels, self.alpha, self.beta)\n        assign_ious = assign_result.max_overlaps\n        assign_metrics = assign_result.assign_metrics\n\n        sampling_result = self.sampler.sample(assign_result, anchors,\n                                              gt_bboxes)\n\n        num_valid_anchors = anchors.shape[0]\n        bbox_targets = torch.zeros_like(anchors)\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n        norm_alignment_metrics = anchors.new_zeros(\n            num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            # point-based\n            pos_bbox_targets = sampling_result.pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n\n            if gt_labels is None:\n                # Only rpn gives gt_labels as None\n                # Foreground is the first class since v2.5.0\n                labels[pos_inds] = 0\n            else:\n                labels[pos_inds] = gt_labels[\n                    sampling_result.pos_assigned_gt_inds]\n            if self.train_cfg.pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg.pos_weight\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        class_assigned_gt_inds = torch.unique(\n            sampling_result.pos_assigned_gt_inds)\n        for gt_inds in class_assigned_gt_inds:\n            gt_class_inds = pos_inds[sampling_result.pos_assigned_gt_inds ==\n                                     gt_inds]\n            pos_alignment_metrics = assign_metrics[gt_class_inds]\n            pos_ious = assign_ious[gt_class_inds]\n            pos_norm_alignment_metrics = pos_alignment_metrics / (\n                pos_alignment_metrics.max() + 10e-8) * pos_ious.max()\n            norm_alignment_metrics[gt_class_inds] = pos_norm_alignment_metrics\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            anchors = unmap(anchors, num_total_anchors, inside_flags)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags, fill=self.num_classes)\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n            bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)\n            norm_alignment_metrics = unmap(norm_alignment_metrics,\n                                           num_total_anchors, inside_flags)\n        return (anchors, labels, label_weights, bbox_targets,\n                norm_alignment_metrics)\n"
  },
  {
    "path": "mmdet/models/dense_heads/vfnet_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scale\nfrom mmcv.ops import DeformConv2d\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (MlvlPointGenerator, bbox_overlaps, build_assigner,\n                        build_prior_generator, build_sampler, multi_apply,\n                        reduce_mean)\nfrom ..builder import HEADS, build_loss\nfrom .atss_head import ATSSHead\nfrom .fcos_head import FCOSHead\n\nINF = 1e8\n\n\n@HEADS.register_module()\nclass VFNetHead(ATSSHead, FCOSHead):\n    \"\"\"Head of `VarifocalNet (VFNet): An IoU-aware Dense Object\n    Detector.<https://arxiv.org/abs/2008.13367>`_.\n\n    The VFNet predicts IoU-aware classification scores which mix the\n    object presence confidence and object localization accuracy as the\n    detection score. It is built on the FCOS architecture and uses ATSS\n    for defining positive/negative training examples. The VFNet is trained\n    with Varifocal Loss and empolys star-shaped deformable convolution to\n    extract features for a bbox.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        regress_ranges (tuple[tuple[int, int]]): Regress range of multiple\n            level points.\n        center_sampling (bool): If true, use center sampling. Default: False.\n        center_sample_radius (float): Radius of center sampling. Default: 1.5.\n        sync_num_pos (bool): If true, synchronize the number of positive\n            examples across GPUs. Default: True\n        gradient_mul (float): The multiplier to gradients from bbox refinement\n            and recognition. Default: 0.1.\n        bbox_norm_type (str): The bbox normalization type, 'reg_denom' or\n            'stride'. Default: reg_denom\n        loss_cls_fl (dict): Config of focal loss.\n        use_vfl (bool): If true, use varifocal loss for training.\n            Default: True.\n        loss_cls (dict): Config of varifocal loss.\n        loss_bbox (dict): Config of localization loss, GIoU Loss.\n        loss_bbox (dict): Config of localization refinement loss, GIoU Loss.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Default: norm_cfg=dict(type='GN', num_groups=32,\n            requires_grad=True).\n        use_atss (bool): If true, use ATSS to define positive/negative\n            examples. Default: True.\n        anchor_generator (dict): Config of anchor generator for ATSS.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n\n    Example:\n        >>> self = VFNetHead(11, 7)\n        >>> feats = [torch.rand(1, 7, s, s) for s in [4, 8, 16, 32, 64]]\n        >>> cls_score, bbox_pred, bbox_pred_refine= self.forward(feats)\n        >>> assert len(cls_score) == len(self.scales)\n    \"\"\"  # noqa: E501\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 regress_ranges=((-1, 64), (64, 128), (128, 256), (256, 512),\n                                 (512, INF)),\n                 center_sampling=False,\n                 center_sample_radius=1.5,\n                 sync_num_pos=True,\n                 gradient_mul=0.1,\n                 bbox_norm_type='reg_denom',\n                 loss_cls_fl=dict(\n                     type='FocalLoss',\n                     use_sigmoid=True,\n                     gamma=2.0,\n                     alpha=0.25,\n                     loss_weight=1.0),\n                 use_vfl=True,\n                 loss_cls=dict(\n                     type='VarifocalLoss',\n                     use_sigmoid=True,\n                     alpha=0.75,\n                     gamma=2.0,\n                     iou_weighted=True,\n                     loss_weight=1.0),\n                 loss_bbox=dict(type='GIoULoss', loss_weight=1.5),\n                 loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0),\n                 norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n                 use_atss=True,\n                 reg_decoded_bbox=True,\n                 anchor_generator=dict(\n                     type='AnchorGenerator',\n                     ratios=[1.0],\n                     octave_base_scale=8,\n                     scales_per_octave=1,\n                     center_offset=0.0,\n                     strides=[8, 16, 32, 64, 128]),\n                 init_cfg=dict(\n                     type='Normal',\n                     layer='Conv2d',\n                     std=0.01,\n                     override=dict(\n                         type='Normal',\n                         name='vfnet_cls',\n                         std=0.01,\n                         bias_prob=0.01)),\n                 **kwargs):\n        # dcn base offsets, adapted from reppoints_head.py\n        self.num_dconv_points = 9\n        self.dcn_kernel = int(np.sqrt(self.num_dconv_points))\n        self.dcn_pad = int((self.dcn_kernel - 1) / 2)\n        dcn_base = np.arange(-self.dcn_pad,\n                             self.dcn_pad + 1).astype(np.float64)\n        dcn_base_y = np.repeat(dcn_base, self.dcn_kernel)\n        dcn_base_x = np.tile(dcn_base, self.dcn_kernel)\n        dcn_base_offset = np.stack([dcn_base_y, dcn_base_x], axis=1).reshape(\n            (-1))\n        self.dcn_base_offset = torch.tensor(dcn_base_offset).view(1, -1, 1, 1)\n\n        super(FCOSHead, self).__init__(\n            num_classes,\n            in_channels,\n            norm_cfg=norm_cfg,\n            init_cfg=init_cfg,\n            **kwargs)\n        self.regress_ranges = regress_ranges\n        self.reg_denoms = [\n            regress_range[-1] for regress_range in regress_ranges\n        ]\n        self.reg_denoms[-1] = self.reg_denoms[-2] * 2\n        self.center_sampling = center_sampling\n        self.center_sample_radius = center_sample_radius\n        self.sync_num_pos = sync_num_pos\n        self.bbox_norm_type = bbox_norm_type\n        self.gradient_mul = gradient_mul\n        self.use_vfl = use_vfl\n        if self.use_vfl:\n            self.loss_cls = build_loss(loss_cls)\n        else:\n            self.loss_cls = build_loss(loss_cls_fl)\n        self.loss_bbox = build_loss(loss_bbox)\n        self.loss_bbox_refine = build_loss(loss_bbox_refine)\n\n        # for getting ATSS targets\n        self.use_atss = use_atss\n        self.reg_decoded_bbox = reg_decoded_bbox\n        self.use_sigmoid_cls = loss_cls.get('use_sigmoid', False)\n\n        self.anchor_center_offset = anchor_generator['center_offset']\n\n        self.num_base_priors = self.prior_generator.num_base_priors[0]\n\n        self.sampling = False\n        if self.train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n        # only be used in `get_atss_targets` when `use_atss` is True\n        self.atss_prior_generator = build_prior_generator(anchor_generator)\n\n        self.fcos_prior_generator = MlvlPointGenerator(\n            anchor_generator['strides'],\n            self.anchor_center_offset if self.use_atss else 0.5)\n\n        # In order to reuse the `get_bboxes` in `BaseDenseHead.\n        # Only be used in testing phase.\n        self.prior_generator = self.fcos_prior_generator\n\n    @property\n    def num_anchors(self):\n        \"\"\"\n        Returns:\n            int: Number of anchors on each point of feature map.\n        \"\"\"\n        warnings.warn('DeprecationWarning: `num_anchors` is deprecated, '\n                      'please use \"num_base_priors\" instead')\n        return self.num_base_priors\n\n    @property\n    def anchor_generator(self):\n        warnings.warn('DeprecationWarning: anchor_generator is deprecated, '\n                      'please use \"atss_prior_generator\" instead')\n        return self.prior_generator\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        super(FCOSHead, self)._init_cls_convs()\n        super(FCOSHead, self)._init_reg_convs()\n        self.relu = nn.ReLU(inplace=True)\n        self.vfnet_reg_conv = ConvModule(\n            self.feat_channels,\n            self.feat_channels,\n            3,\n            stride=1,\n            padding=1,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg,\n            bias=self.conv_bias)\n        self.vfnet_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n        self.vfnet_reg_refine_dconv = DeformConv2d(\n            self.feat_channels,\n            self.feat_channels,\n            self.dcn_kernel,\n            1,\n            padding=self.dcn_pad)\n        self.vfnet_reg_refine = nn.Conv2d(self.feat_channels, 4, 3, padding=1)\n        self.scales_refine = nn.ModuleList([Scale(1.0) for _ in self.strides])\n\n        self.vfnet_cls_dconv = DeformConv2d(\n            self.feat_channels,\n            self.feat_channels,\n            self.dcn_kernel,\n            1,\n            padding=self.dcn_pad)\n        self.vfnet_cls = nn.Conv2d(\n            self.feat_channels, self.cls_out_channels, 3, padding=1)\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple:\n                cls_scores (list[Tensor]): Box iou-aware scores for each scale\n                    level, each is a 4D-tensor, the channel number is\n                    num_points * num_classes.\n                bbox_preds (list[Tensor]): Box offsets for each\n                    scale level, each is a 4D-tensor, the channel number is\n                    num_points * 4.\n                bbox_preds_refine (list[Tensor]): Refined Box offsets for\n                    each scale level, each is a 4D-tensor, the channel\n                    number is num_points * 4.\n        \"\"\"\n        return multi_apply(self.forward_single, feats, self.scales,\n                           self.scales_refine, self.strides, self.reg_denoms)\n\n    def forward_single(self, x, scale, scale_refine, stride, reg_denom):\n        \"\"\"Forward features of a single scale level.\n\n        Args:\n            x (Tensor): FPN feature maps of the specified stride.\n            scale (:obj: `mmcv.cnn.Scale`): Learnable scale module to resize\n                the bbox prediction.\n            scale_refine (:obj: `mmcv.cnn.Scale`): Learnable scale module to\n                resize the refined bbox prediction.\n            stride (int): The corresponding stride for feature maps,\n                used to normalize the bbox prediction when\n                bbox_norm_type = 'stride'.\n            reg_denom (int): The corresponding regression range for feature\n                maps, only used to normalize the bbox prediction when\n                bbox_norm_type = 'reg_denom'.\n\n        Returns:\n            tuple: iou-aware cls scores for each box, bbox predictions and\n                refined bbox predictions of input feature maps.\n        \"\"\"\n        cls_feat = x\n        reg_feat = x\n\n        for cls_layer in self.cls_convs:\n            cls_feat = cls_layer(cls_feat)\n\n        for reg_layer in self.reg_convs:\n            reg_feat = reg_layer(reg_feat)\n\n        # predict the bbox_pred of different level\n        reg_feat_init = self.vfnet_reg_conv(reg_feat)\n        if self.bbox_norm_type == 'reg_denom':\n            bbox_pred = scale(\n                self.vfnet_reg(reg_feat_init)).float().exp() * reg_denom\n        elif self.bbox_norm_type == 'stride':\n            bbox_pred = scale(\n                self.vfnet_reg(reg_feat_init)).float().exp() * stride\n        else:\n            raise NotImplementedError\n\n        # compute star deformable convolution offsets\n        # converting dcn_offset to reg_feat.dtype thus VFNet can be\n        # trained with FP16\n        dcn_offset = self.star_dcn_offset(bbox_pred, self.gradient_mul,\n                                          stride).to(reg_feat.dtype)\n\n        # refine the bbox_pred\n        reg_feat = self.relu(self.vfnet_reg_refine_dconv(reg_feat, dcn_offset))\n        bbox_pred_refine = scale_refine(\n            self.vfnet_reg_refine(reg_feat)).float().exp()\n        bbox_pred_refine = bbox_pred_refine * bbox_pred.detach()\n\n        # predict the iou-aware cls score\n        cls_feat = self.relu(self.vfnet_cls_dconv(cls_feat, dcn_offset))\n        cls_score = self.vfnet_cls(cls_feat)\n\n        if self.training:\n            return cls_score, bbox_pred, bbox_pred_refine\n        else:\n            return cls_score, bbox_pred_refine\n\n    def star_dcn_offset(self, bbox_pred, gradient_mul, stride):\n        \"\"\"Compute the star deformable conv offsets.\n\n        Args:\n            bbox_pred (Tensor): Predicted bbox distance offsets (l, r, t, b).\n            gradient_mul (float): Gradient multiplier.\n            stride (int): The corresponding stride for feature maps,\n                used to project the bbox onto the feature map.\n\n        Returns:\n            dcn_offsets (Tensor): The offsets for deformable convolution.\n        \"\"\"\n        dcn_base_offset = self.dcn_base_offset.type_as(bbox_pred)\n        bbox_pred_grad_mul = (1 - gradient_mul) * bbox_pred.detach() + \\\n            gradient_mul * bbox_pred\n        # map to the feature map scale\n        bbox_pred_grad_mul = bbox_pred_grad_mul / stride\n        N, C, H, W = bbox_pred.size()\n\n        x1 = bbox_pred_grad_mul[:, 0, :, :]\n        y1 = bbox_pred_grad_mul[:, 1, :, :]\n        x2 = bbox_pred_grad_mul[:, 2, :, :]\n        y2 = bbox_pred_grad_mul[:, 3, :, :]\n        bbox_pred_grad_mul_offset = bbox_pred.new_zeros(\n            N, 2 * self.num_dconv_points, H, W)\n        bbox_pred_grad_mul_offset[:, 0, :, :] = -1.0 * y1  # -y1\n        bbox_pred_grad_mul_offset[:, 1, :, :] = -1.0 * x1  # -x1\n        bbox_pred_grad_mul_offset[:, 2, :, :] = -1.0 * y1  # -y1\n        bbox_pred_grad_mul_offset[:, 4, :, :] = -1.0 * y1  # -y1\n        bbox_pred_grad_mul_offset[:, 5, :, :] = x2  # x2\n        bbox_pred_grad_mul_offset[:, 7, :, :] = -1.0 * x1  # -x1\n        bbox_pred_grad_mul_offset[:, 11, :, :] = x2  # x2\n        bbox_pred_grad_mul_offset[:, 12, :, :] = y2  # y2\n        bbox_pred_grad_mul_offset[:, 13, :, :] = -1.0 * x1  # -x1\n        bbox_pred_grad_mul_offset[:, 14, :, :] = y2  # y2\n        bbox_pred_grad_mul_offset[:, 16, :, :] = y2  # y2\n        bbox_pred_grad_mul_offset[:, 17, :, :] = x2  # x2\n        dcn_offset = bbox_pred_grad_mul_offset - dcn_base_offset\n\n        return dcn_offset\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'bbox_preds_refine'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             bbox_preds_refine,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute loss of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box iou-aware scores for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_points * num_classes.\n            bbox_preds (list[Tensor]): Box offsets for each\n                scale level, each is a 4D-tensor, the channel number is\n                num_points * 4.\n            bbox_preds_refine (list[Tensor]): Refined Box offsets for\n                each scale level, each is a 4D-tensor, the channel\n                number is num_points * 4.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n                Default: None.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds) == len(bbox_preds_refine)\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        all_level_points = self.fcos_prior_generator.grid_priors(\n            featmap_sizes, bbox_preds[0].dtype, bbox_preds[0].device)\n        labels, label_weights, bbox_targets, bbox_weights = self.get_targets(\n            cls_scores, all_level_points, gt_bboxes, gt_labels, img_metas,\n            gt_bboxes_ignore)\n\n        num_imgs = cls_scores[0].size(0)\n        # flatten cls_scores, bbox_preds and bbox_preds_refine\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3,\n                              1).reshape(-1,\n                                         self.cls_out_channels).contiguous()\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()\n            for bbox_pred in bbox_preds\n        ]\n        flatten_bbox_preds_refine = [\n            bbox_pred_refine.permute(0, 2, 3, 1).reshape(-1, 4).contiguous()\n            for bbox_pred_refine in bbox_preds_refine\n        ]\n        flatten_cls_scores = torch.cat(flatten_cls_scores)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds)\n        flatten_bbox_preds_refine = torch.cat(flatten_bbox_preds_refine)\n        flatten_labels = torch.cat(labels)\n        flatten_bbox_targets = torch.cat(bbox_targets)\n        # repeat points to align with bbox_preds\n        flatten_points = torch.cat(\n            [points.repeat(num_imgs, 1) for points in all_level_points])\n\n        # FG cat_id: [0, num_classes - 1], BG cat_id: num_classes\n        bg_class_ind = self.num_classes\n        pos_inds = torch.where(\n            ((flatten_labels >= 0) & (flatten_labels < bg_class_ind)) > 0)[0]\n        num_pos = len(pos_inds)\n\n        pos_bbox_preds = flatten_bbox_preds[pos_inds]\n        pos_bbox_preds_refine = flatten_bbox_preds_refine[pos_inds]\n        pos_labels = flatten_labels[pos_inds]\n\n        # sync num_pos across all gpus\n        if self.sync_num_pos:\n            num_pos_avg_per_gpu = reduce_mean(\n                pos_inds.new_tensor(num_pos).float()).item()\n            num_pos_avg_per_gpu = max(num_pos_avg_per_gpu, 1.0)\n        else:\n            num_pos_avg_per_gpu = num_pos\n\n        pos_bbox_targets = flatten_bbox_targets[pos_inds]\n        pos_points = flatten_points[pos_inds]\n\n        pos_decoded_bbox_preds = self.bbox_coder.decode(\n            pos_points, pos_bbox_preds)\n        pos_decoded_target_preds = self.bbox_coder.decode(\n            pos_points, pos_bbox_targets)\n        iou_targets_ini = bbox_overlaps(\n            pos_decoded_bbox_preds,\n            pos_decoded_target_preds.detach(),\n            is_aligned=True).clamp(min=1e-6)\n        bbox_weights_ini = iou_targets_ini.clone().detach()\n        bbox_avg_factor_ini = reduce_mean(\n            bbox_weights_ini.sum()).clamp_(min=1).item()\n\n        pos_decoded_bbox_preds_refine = \\\n            self.bbox_coder.decode(pos_points, pos_bbox_preds_refine)\n        iou_targets_rf = bbox_overlaps(\n            pos_decoded_bbox_preds_refine,\n            pos_decoded_target_preds.detach(),\n            is_aligned=True).clamp(min=1e-6)\n        bbox_weights_rf = iou_targets_rf.clone().detach()\n        bbox_avg_factor_rf = reduce_mean(\n            bbox_weights_rf.sum()).clamp_(min=1).item()\n\n        if num_pos > 0:\n            loss_bbox = self.loss_bbox(\n                pos_decoded_bbox_preds,\n                pos_decoded_target_preds.detach(),\n                weight=bbox_weights_ini,\n                avg_factor=bbox_avg_factor_ini)\n\n            loss_bbox_refine = self.loss_bbox_refine(\n                pos_decoded_bbox_preds_refine,\n                pos_decoded_target_preds.detach(),\n                weight=bbox_weights_rf,\n                avg_factor=bbox_avg_factor_rf)\n\n            # build IoU-aware cls_score targets\n            if self.use_vfl:\n                pos_ious = iou_targets_rf.clone().detach()\n                cls_iou_targets = torch.zeros_like(flatten_cls_scores)\n                cls_iou_targets[pos_inds, pos_labels] = pos_ious\n        else:\n            loss_bbox = pos_bbox_preds.sum() * 0\n            loss_bbox_refine = pos_bbox_preds_refine.sum() * 0\n            if self.use_vfl:\n                cls_iou_targets = torch.zeros_like(flatten_cls_scores)\n\n        if self.use_vfl:\n            loss_cls = self.loss_cls(\n                flatten_cls_scores,\n                cls_iou_targets,\n                avg_factor=num_pos_avg_per_gpu)\n        else:\n            loss_cls = self.loss_cls(\n                flatten_cls_scores,\n                flatten_labels,\n                weight=label_weights,\n                avg_factor=num_pos_avg_per_gpu)\n\n        return dict(\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            loss_bbox_rf=loss_bbox_refine)\n\n    def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,\n                    img_metas, gt_bboxes_ignore):\n        \"\"\"A wrapper for computing ATSS and FCOS targets for points in multiple\n        images.\n\n        Args:\n            cls_scores (list[Tensor]): Box iou-aware scores for each scale\n                level with shape (N, num_points * num_classes, H, W).\n            mlvl_points (list[Tensor]): Points of each fpn level, each has\n                shape (num_points, 2).\n            gt_bboxes (list[Tensor]): Ground truth bboxes of each image,\n                each has shape (num_gt, 4).\n            gt_labels (list[Tensor]): Ground truth labels of each box,\n                each has shape (num_gt,).\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n\n        Returns:\n            tuple:\n                labels_list (list[Tensor]): Labels of each level.\n                label_weights (Tensor/None): Label weights of all levels.\n                bbox_targets_list (list[Tensor]): Regression targets of each\n                    level, (l, t, r, b).\n                bbox_weights (Tensor/None): Bbox weights of all levels.\n        \"\"\"\n        if self.use_atss:\n            return self.get_atss_targets(cls_scores, mlvl_points, gt_bboxes,\n                                         gt_labels, img_metas,\n                                         gt_bboxes_ignore)\n        else:\n            self.norm_on_bbox = False\n            return self.get_fcos_targets(mlvl_points, gt_bboxes, gt_labels)\n\n    def _get_target_single(self, *args, **kwargs):\n        \"\"\"Avoid ambiguity in multiple inheritance.\"\"\"\n        if self.use_atss:\n            return ATSSHead._get_target_single(self, *args, **kwargs)\n        else:\n            return FCOSHead._get_target_single(self, *args, **kwargs)\n\n    def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):\n        \"\"\"Compute FCOS regression and classification targets for points in\n        multiple images.\n\n        Args:\n            points (list[Tensor]): Points of each fpn level, each has shape\n                (num_points, 2).\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image,\n                each has shape (num_gt, 4).\n            gt_labels_list (list[Tensor]): Ground truth labels of each box,\n                each has shape (num_gt,).\n\n        Returns:\n            tuple:\n                labels (list[Tensor]): Labels of each level.\n                label_weights: None, to be compatible with ATSS targets.\n                bbox_targets (list[Tensor]): BBox targets of each level.\n                bbox_weights: None, to be compatible with ATSS targets.\n        \"\"\"\n        labels, bbox_targets = FCOSHead.get_targets(self, points,\n                                                    gt_bboxes_list,\n                                                    gt_labels_list)\n        label_weights = None\n        bbox_weights = None\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    def get_anchors(self, featmap_sizes, img_metas, device='cuda'):\n        \"\"\"Get anchors according to feature map sizes.\n\n        Args:\n            featmap_sizes (list[tuple]): Multi-level feature map sizes.\n            img_metas (list[dict]): Image meta info.\n            device (torch.device | str): Device for returned tensors\n\n        Returns:\n            tuple:\n                anchor_list (list[Tensor]): Anchors of each image.\n                valid_flag_list (list[Tensor]): Valid flags of each image.\n        \"\"\"\n        num_imgs = len(img_metas)\n\n        # since feature map sizes of all images are the same, we only compute\n        # anchors for one time\n        multi_level_anchors = self.atss_prior_generator.grid_priors(\n            featmap_sizes, device=device)\n        anchor_list = [multi_level_anchors for _ in range(num_imgs)]\n\n        # for each image, we compute valid flags of multi level anchors\n        valid_flag_list = []\n        for img_id, img_meta in enumerate(img_metas):\n            multi_level_flags = self.atss_prior_generator.valid_flags(\n                featmap_sizes, img_meta['pad_shape'], device=device)\n            valid_flag_list.append(multi_level_flags)\n\n        return anchor_list, valid_flag_list\n\n    def get_atss_targets(self,\n                         cls_scores,\n                         mlvl_points,\n                         gt_bboxes,\n                         gt_labels,\n                         img_metas,\n                         gt_bboxes_ignore=None):\n        \"\"\"A wrapper for computing ATSS targets for points in multiple images.\n\n        Args:\n            cls_scores (list[Tensor]): Box iou-aware scores for each scale\n                level with shape (N, num_points * num_classes, H, W).\n            mlvl_points (list[Tensor]): Points of each fpn level, each has\n                shape (num_points, 2).\n            gt_bboxes (list[Tensor]): Ground truth bboxes of each image,\n                each has shape (num_gt, 4).\n            gt_labels (list[Tensor]): Ground truth labels of each box,\n                each has shape (num_gt,).\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4). Default: None.\n\n        Returns:\n            tuple:\n                labels_list (list[Tensor]): Labels of each level.\n                label_weights (Tensor): Label weights of all levels.\n                bbox_targets_list (list[Tensor]): Regression targets of each\n                    level, (l, t, r, b).\n                bbox_weights (Tensor): Bbox weights of all levels.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(\n            featmap_sizes\n        ) == self.atss_prior_generator.num_levels == \\\n            self.fcos_prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n\n        cls_reg_targets = ATSSHead.get_targets(\n            self,\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels,\n            unmap_outputs=True)\n        if cls_reg_targets is None:\n            return None\n\n        (anchor_list, labels_list, label_weights_list, bbox_targets_list,\n         bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets\n\n        bbox_targets_list = [\n            bbox_targets.reshape(-1, 4) for bbox_targets in bbox_targets_list\n        ]\n\n        num_imgs = len(img_metas)\n        # transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format\n        bbox_targets_list = self.transform_bbox_targets(\n            bbox_targets_list, mlvl_points, num_imgs)\n\n        labels_list = [labels.reshape(-1) for labels in labels_list]\n        label_weights_list = [\n            label_weights.reshape(-1) for label_weights in label_weights_list\n        ]\n        bbox_weights_list = [\n            bbox_weights.reshape(-1) for bbox_weights in bbox_weights_list\n        ]\n        label_weights = torch.cat(label_weights_list)\n        bbox_weights = torch.cat(bbox_weights_list)\n        return labels_list, label_weights, bbox_targets_list, bbox_weights\n\n    def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):\n        \"\"\"Transform bbox_targets (x1, y1, x2, y2) into (l, t, r, b) format.\n\n        Args:\n            decoded_bboxes (list[Tensor]): Regression targets of each level,\n                in the form of (x1, y1, x2, y2).\n            mlvl_points (list[Tensor]): Points of each fpn level, each has\n                shape (num_points, 2).\n            num_imgs (int): the number of images in a batch.\n\n        Returns:\n            bbox_targets (list[Tensor]): Regression targets of each level in\n                the form of (l, t, r, b).\n        \"\"\"\n        # TODO: Re-implemented in Class PointCoder\n        assert len(decoded_bboxes) == len(mlvl_points)\n        num_levels = len(decoded_bboxes)\n        mlvl_points = [points.repeat(num_imgs, 1) for points in mlvl_points]\n        bbox_targets = []\n        for i in range(num_levels):\n            bbox_target = self.bbox_coder.encode(mlvl_points[i],\n                                                 decoded_bboxes[i])\n            bbox_targets.append(bbox_target)\n\n        return bbox_targets\n\n    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n                              missing_keys, unexpected_keys, error_msgs):\n        \"\"\"Override the method in the parent class to avoid changing para's\n        name.\"\"\"\n        pass\n\n    def _get_points_single(self,\n                           featmap_size,\n                           stride,\n                           dtype,\n                           device,\n                           flatten=False):\n        \"\"\"Get points according to feature map size.\n\n        This function will be deprecated soon.\n        \"\"\"\n\n        warnings.warn(\n            '`_get_points_single` in `VFNetHead` will be '\n            'deprecated soon, we support a multi level point generator now'\n            'you can get points of a single level feature map'\n            'with `self.fcos_prior_generator.single_level_grid_priors` ')\n\n        h, w = featmap_size\n        x_range = torch.arange(\n            0, w * stride, stride, dtype=dtype, device=device)\n        y_range = torch.arange(\n            0, h * stride, stride, dtype=dtype, device=device)\n        y, x = torch.meshgrid(y_range, x_range)\n        # to be compatible with anchor points in ATSS\n        if self.use_atss:\n            points = torch.stack(\n                (x.reshape(-1), y.reshape(-1)), dim=-1) + \\\n                     stride * self.anchor_center_offset\n        else:\n            points = torch.stack(\n                (x.reshape(-1), y.reshape(-1)), dim=-1) + stride // 2\n        return points\n"
  },
  {
    "path": "mmdet/models/dense_heads/yolact_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule, ModuleList, force_fp32\n\nfrom mmdet.core import build_sampler, fast_nms, images_to_levels, multi_apply\nfrom mmdet.core.utils import select_single_mlvl\nfrom ..builder import HEADS, build_loss\nfrom .anchor_head import AnchorHead\n\n\n@HEADS.register_module()\nclass YOLACTHead(AnchorHead):\n    \"\"\"YOLACT box head used in https://arxiv.org/abs/1904.02689.\n\n    Note that YOLACT head is a light version of RetinaNet head.\n    Four differences are described as follows:\n\n    1. YOLACT box head has three-times fewer anchors.\n    2. YOLACT box head shares the convs for box and cls branches.\n    3. YOLACT box head uses OHEM instead of Focal loss.\n    4. YOLACT box head predicts a set of mask coefficients for each box.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        anchor_generator (dict): Config dict for anchor generator\n        loss_cls (dict): Config of classification loss.\n        loss_bbox (dict): Config of localization loss.\n        num_head_convs (int): Number of the conv layers shared by\n            box and cls branches.\n        num_protos (int): Number of the mask coefficients.\n        use_ohem (bool): If true, ``loss_single_OHEM`` will be used for\n            cls loss calculation. If false, ``loss_single`` will be used.\n        conv_cfg (dict): Dictionary to construct and config conv layer.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 anchor_generator=dict(\n                     type='AnchorGenerator',\n                     octave_base_scale=3,\n                     scales_per_octave=1,\n                     ratios=[0.5, 1.0, 2.0],\n                     strides=[8, 16, 32, 64, 128]),\n                 loss_cls=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=False,\n                     reduction='none',\n                     loss_weight=1.0),\n                 loss_bbox=dict(\n                     type='SmoothL1Loss', beta=1.0, loss_weight=1.5),\n                 num_head_convs=1,\n                 num_protos=32,\n                 use_ohem=True,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 init_cfg=dict(\n                     type='Xavier',\n                     distribution='uniform',\n                     bias=0,\n                     layer='Conv2d'),\n                 **kwargs):\n        self.num_head_convs = num_head_convs\n        self.num_protos = num_protos\n        self.use_ohem = use_ohem\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        super(YOLACTHead, self).__init__(\n            num_classes,\n            in_channels,\n            loss_cls=loss_cls,\n            loss_bbox=loss_bbox,\n            anchor_generator=anchor_generator,\n            init_cfg=init_cfg,\n            **kwargs)\n        if self.use_ohem:\n            sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n            self.sampling = False\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.relu = nn.ReLU(inplace=True)\n        self.head_convs = ModuleList()\n        for i in range(self.num_head_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            self.head_convs.append(\n                ConvModule(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        self.conv_cls = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.cls_out_channels,\n            3,\n            padding=1)\n        self.conv_reg = nn.Conv2d(\n            self.feat_channels, self.num_base_priors * 4, 3, padding=1)\n        self.conv_coeff = nn.Conv2d(\n            self.feat_channels,\n            self.num_base_priors * self.num_protos,\n            3,\n            padding=1)\n\n    def forward_single(self, x):\n        \"\"\"Forward feature of a single scale level.\n\n        Args:\n            x (Tensor): Features of a single scale level.\n\n        Returns:\n            tuple:\n                cls_score (Tensor): Cls scores for a single scale level \\\n                    the channels number is num_anchors * num_classes.\n                bbox_pred (Tensor): Box energies / deltas for a single scale \\\n                    level, the channels number is num_anchors * 4.\n                coeff_pred (Tensor): Mask coefficients for a single scale \\\n                    level, the channels number is num_anchors * num_protos.\n        \"\"\"\n        for head_conv in self.head_convs:\n            x = head_conv(x)\n        cls_score = self.conv_cls(x)\n        bbox_pred = self.conv_reg(x)\n        coeff_pred = self.conv_coeff(x).tanh()\n        return cls_score, bbox_pred, coeff_pred\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"A combination of the func:``AnchorHead.loss`` and\n        func:``SSDHead.loss``.\n\n        When ``self.use_ohem == True``, it functions like ``SSDHead.loss``,\n        otherwise, it follows ``AnchorHead.loss``. Besides, it additionally\n        returns ``sampling_results``.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): Class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n                boxes can be ignored when computing the loss. Default: None\n\n        Returns:\n            tuple:\n                dict[str, Tensor]: A dictionary of loss components.\n                List[:obj:``SamplingResult``]: Sampler results for each image.\n        \"\"\"\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        assert len(featmap_sizes) == self.prior_generator.num_levels\n\n        device = cls_scores[0].device\n\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n        cls_reg_targets = self.get_targets(\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels,\n            unmap_outputs=not self.use_ohem,\n            return_sampling_results=True)\n        if cls_reg_targets is None:\n            return None\n        (labels_list, label_weights_list, bbox_targets_list, bbox_weights_list,\n         num_total_pos, num_total_neg, sampling_results) = cls_reg_targets\n\n        if self.use_ohem:\n            num_images = len(img_metas)\n            all_cls_scores = torch.cat([\n                s.permute(0, 2, 3, 1).reshape(\n                    num_images, -1, self.cls_out_channels) for s in cls_scores\n            ], 1)\n            all_labels = torch.cat(labels_list, -1).view(num_images, -1)\n            all_label_weights = torch.cat(label_weights_list,\n                                          -1).view(num_images, -1)\n            all_bbox_preds = torch.cat([\n                b.permute(0, 2, 3, 1).reshape(num_images, -1, 4)\n                for b in bbox_preds\n            ], -2)\n            all_bbox_targets = torch.cat(bbox_targets_list,\n                                         -2).view(num_images, -1, 4)\n            all_bbox_weights = torch.cat(bbox_weights_list,\n                                         -2).view(num_images, -1, 4)\n\n            # concat all level anchors to a single tensor\n            all_anchors = []\n            for i in range(num_images):\n                all_anchors.append(torch.cat(anchor_list[i]))\n\n            # check NaN and Inf\n            assert torch.isfinite(all_cls_scores).all().item(), \\\n                'classification scores become infinite or NaN!'\n            assert torch.isfinite(all_bbox_preds).all().item(), \\\n                'bbox predications become infinite or NaN!'\n\n            losses_cls, losses_bbox = multi_apply(\n                self.loss_single_OHEM,\n                all_cls_scores,\n                all_bbox_preds,\n                all_anchors,\n                all_labels,\n                all_label_weights,\n                all_bbox_targets,\n                all_bbox_weights,\n                num_total_samples=num_total_pos)\n        else:\n            num_total_samples = (\n                num_total_pos +\n                num_total_neg if self.sampling else num_total_pos)\n\n            # anchor number of multi levels\n            num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n            # concat all level anchors and flags to a single tensor\n            concat_anchor_list = []\n            for i in range(len(anchor_list)):\n                concat_anchor_list.append(torch.cat(anchor_list[i]))\n            all_anchor_list = images_to_levels(concat_anchor_list,\n                                               num_level_anchors)\n            losses_cls, losses_bbox = multi_apply(\n                self.loss_single,\n                cls_scores,\n                bbox_preds,\n                all_anchor_list,\n                labels_list,\n                label_weights_list,\n                bbox_targets_list,\n                bbox_weights_list,\n                num_total_samples=num_total_samples)\n\n        return dict(\n            loss_cls=losses_cls, loss_bbox=losses_bbox), sampling_results\n\n    def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels,\n                         label_weights, bbox_targets, bbox_weights,\n                         num_total_samples):\n        \"\"\"\"See func:``SSDHead.loss``.\"\"\"\n        loss_cls_all = self.loss_cls(cls_score, labels, label_weights)\n\n        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n        pos_inds = ((labels >= 0) & (labels < self.num_classes)).nonzero(\n            as_tuple=False).reshape(-1)\n        neg_inds = (labels == self.num_classes).nonzero(\n            as_tuple=False).view(-1)\n\n        num_pos_samples = pos_inds.size(0)\n        if num_pos_samples == 0:\n            num_neg_samples = neg_inds.size(0)\n        else:\n            num_neg_samples = self.train_cfg.neg_pos_ratio * num_pos_samples\n            if num_neg_samples > neg_inds.size(0):\n                num_neg_samples = neg_inds.size(0)\n        topk_loss_cls_neg, _ = loss_cls_all[neg_inds].topk(num_neg_samples)\n        loss_cls_pos = loss_cls_all[pos_inds].sum()\n        loss_cls_neg = topk_loss_cls_neg.sum()\n        loss_cls = (loss_cls_pos + loss_cls_neg) / num_total_samples\n        if self.reg_decoded_bbox:\n            # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n            # is applied directly on the decoded bounding boxes, it\n            # decodes the already encoded coordinates to absolute format.\n            bbox_pred = self.bbox_coder.decode(anchors, bbox_pred)\n        loss_bbox = self.loss_bbox(\n            bbox_pred,\n            bbox_targets,\n            bbox_weights,\n            avg_factor=num_total_samples)\n        return loss_cls[None], loss_bbox\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'coeff_preds'))\n    def get_bboxes(self,\n                   cls_scores,\n                   bbox_preds,\n                   coeff_preds,\n                   img_metas,\n                   cfg=None,\n                   rescale=False):\n        \"\"\"\"Similar to func:``AnchorHead.get_bboxes``, but additionally\n        processes coeff_preds.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                with shape (N, num_anchors * num_classes, H, W)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (N, num_anchors * 4, H, W)\n            coeff_preds (list[Tensor]): Mask coefficients for each scale\n                level with shape (N, num_anchors * num_protos, H, W)\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            cfg (mmcv.Config | None): Test / postprocessing configuration,\n                if None, test_cfg would be used\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n\n        Returns:\n            list[tuple[Tensor, Tensor, Tensor]]: Each item in result_list is\n                a 3-tuple. The first item is an (n, 5) tensor, where the\n                first 4 columns are bounding box positions\n                (tl_x, tl_y, br_x, br_y) and the 5-th column is a score\n                between 0 and 1. The second item is an (n,) tensor where each\n                item is the predicted class label of the corresponding box.\n                The third item is an (n, num_protos) tensor where each item\n                is the predicted mask coefficients of instance inside the\n                corresponding box.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds)\n        num_levels = len(cls_scores)\n\n        device = cls_scores[0].device\n        featmap_sizes = [cls_scores[i].shape[-2:] for i in range(num_levels)]\n        mlvl_anchors = self.prior_generator.grid_priors(\n            featmap_sizes, device=device)\n\n        det_bboxes = []\n        det_labels = []\n        det_coeffs = []\n        for img_id in range(len(img_metas)):\n            cls_score_list = select_single_mlvl(cls_scores, img_id)\n            bbox_pred_list = select_single_mlvl(bbox_preds, img_id)\n            coeff_pred_list = select_single_mlvl(coeff_preds, img_id)\n            img_shape = img_metas[img_id]['img_shape']\n            scale_factor = img_metas[img_id]['scale_factor']\n            bbox_res = self._get_bboxes_single(cls_score_list, bbox_pred_list,\n                                               coeff_pred_list, mlvl_anchors,\n                                               img_shape, scale_factor, cfg,\n                                               rescale)\n            det_bboxes.append(bbox_res[0])\n            det_labels.append(bbox_res[1])\n            det_coeffs.append(bbox_res[2])\n        return det_bboxes, det_labels, det_coeffs\n\n    def _get_bboxes_single(self,\n                           cls_score_list,\n                           bbox_pred_list,\n                           coeff_preds_list,\n                           mlvl_anchors,\n                           img_shape,\n                           scale_factor,\n                           cfg,\n                           rescale=False):\n        \"\"\"\"Similar to func:``AnchorHead._get_bboxes_single``, but additionally\n        processes coeff_preds_list and uses fast NMS instead of traditional\n        NMS.\n\n        Args:\n            cls_score_list (list[Tensor]): Box scores for a single scale level\n                Has shape (num_anchors * num_classes, H, W).\n            bbox_pred_list (list[Tensor]): Box energies / deltas for a single\n                scale level with shape (num_anchors * 4, H, W).\n            coeff_preds_list (list[Tensor]): Mask coefficients for a single\n                scale level with shape (num_anchors * num_protos, H, W).\n            mlvl_anchors (list[Tensor]): Box reference for a single scale level\n                with shape (num_total_anchors, 4).\n            img_shape (tuple[int]): Shape of the input image,\n                (height, width, 3).\n            scale_factor (ndarray): Scale factor of the image arange as\n                (w_scale, h_scale, w_scale, h_scale).\n            cfg (mmcv.Config): Test / postprocessing configuration,\n                if None, test_cfg would be used.\n            rescale (bool): If True, return boxes in original image space.\n\n        Returns:\n            tuple[Tensor, Tensor, Tensor]: The first item is an (n, 5) tensor,\n                where the first 4 columns are bounding box positions\n                (tl_x, tl_y, br_x, br_y) and the 5-th column is a score between\n                0 and 1. The second item is an (n,) tensor where each item is\n                the predicted class label of the corresponding box. The third\n                item is an (n, num_protos) tensor where each item is the\n                predicted mask coefficients of instance inside the\n                corresponding box.\n        \"\"\"\n        cfg = self.test_cfg if cfg is None else cfg\n        assert len(cls_score_list) == len(bbox_pred_list) == len(mlvl_anchors)\n        nms_pre = cfg.get('nms_pre', -1)\n        mlvl_bboxes = []\n        mlvl_scores = []\n        mlvl_coeffs = []\n        for cls_score, bbox_pred, coeff_pred, anchors in \\\n                zip(cls_score_list, bbox_pred_list,\n                    coeff_preds_list, mlvl_anchors):\n            assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\n            cls_score = cls_score.permute(1, 2,\n                                          0).reshape(-1, self.cls_out_channels)\n            if self.use_sigmoid_cls:\n                scores = cls_score.sigmoid()\n            else:\n                scores = cls_score.softmax(-1)\n            bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, 4)\n            coeff_pred = coeff_pred.permute(1, 2,\n                                            0).reshape(-1, self.num_protos)\n\n            if 0 < nms_pre < scores.shape[0]:\n                # Get maximum scores for foreground classes.\n                if self.use_sigmoid_cls:\n                    max_scores, _ = scores.max(dim=1)\n                else:\n                    # remind that we set FG labels to [0, num_class-1]\n                    # since mmdet v2.0\n                    # BG cat_id: num_class\n                    max_scores, _ = scores[:, :-1].max(dim=1)\n                _, topk_inds = max_scores.topk(nms_pre)\n                anchors = anchors[topk_inds, :]\n                bbox_pred = bbox_pred[topk_inds, :]\n                scores = scores[topk_inds, :]\n                coeff_pred = coeff_pred[topk_inds, :]\n            bboxes = self.bbox_coder.decode(\n                anchors, bbox_pred, max_shape=img_shape)\n            mlvl_bboxes.append(bboxes)\n            mlvl_scores.append(scores)\n            mlvl_coeffs.append(coeff_pred)\n        mlvl_bboxes = torch.cat(mlvl_bboxes)\n        if rescale:\n            mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\n        mlvl_scores = torch.cat(mlvl_scores)\n        mlvl_coeffs = torch.cat(mlvl_coeffs)\n        if self.use_sigmoid_cls:\n            # Add a dummy background class to the backend when using sigmoid\n            # remind that we set FG labels to [0, num_class-1] since mmdet v2.0\n            # BG cat_id: num_class\n            padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\n            mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)\n        det_bboxes, det_labels, det_coeffs = fast_nms(mlvl_bboxes, mlvl_scores,\n                                                      mlvl_coeffs,\n                                                      cfg.score_thr,\n                                                      cfg.iou_thr, cfg.top_k,\n                                                      cfg.max_per_img)\n        return det_bboxes, det_labels, det_coeffs\n\n\n@HEADS.register_module()\nclass YOLACTSegmHead(BaseModule):\n    \"\"\"YOLACT segmentation head used in https://arxiv.org/abs/1904.02689.\n\n    Apply a semantic segmentation loss on feature space using layers that are\n    only evaluated during training to increase performance with no speed\n    penalty.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        num_classes (int): Number of categories excluding the background\n            category.\n        loss_segm (dict): Config of semantic segmentation loss.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels=256,\n                 loss_segm=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 init_cfg=dict(\n                     type='Xavier',\n                     distribution='uniform',\n                     override=dict(name='segm_conv'))):\n        super(YOLACTSegmHead, self).__init__(init_cfg)\n        self.in_channels = in_channels\n        self.num_classes = num_classes\n        self.loss_segm = build_loss(loss_segm)\n        self._init_layers()\n        self.fp16_enabled = False\n\n    def _init_layers(self):\n        \"\"\"Initialize layers of the head.\"\"\"\n        self.segm_conv = nn.Conv2d(\n            self.in_channels, self.num_classes, kernel_size=1)\n\n    def forward(self, x):\n        \"\"\"Forward feature from the upstream network.\n\n        Args:\n            x (Tensor): Feature from the upstream network, which is\n                a 4D-tensor.\n\n        Returns:\n            Tensor: Predicted semantic segmentation map with shape\n                (N, num_classes, H, W).\n        \"\"\"\n        return self.segm_conv(x)\n\n    @force_fp32(apply_to=('segm_pred', ))\n    def loss(self, segm_pred, gt_masks, gt_labels):\n        \"\"\"Compute loss of the head.\n\n        Args:\n            segm_pred (list[Tensor]): Predicted semantic segmentation map\n                with shape (N, num_classes, H, W).\n            gt_masks (list[Tensor]): Ground truth masks for each image with\n                the same shape of the input image.\n            gt_labels (list[Tensor]): Class indices corresponding to each box.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        loss_segm = []\n        num_imgs, num_classes, mask_h, mask_w = segm_pred.size()\n        for idx in range(num_imgs):\n            cur_segm_pred = segm_pred[idx]\n            cur_gt_masks = gt_masks[idx].float()\n            cur_gt_labels = gt_labels[idx]\n            segm_targets = self.get_targets(cur_segm_pred, cur_gt_masks,\n                                            cur_gt_labels)\n            if segm_targets is None:\n                loss = self.loss_segm(cur_segm_pred,\n                                      torch.zeros_like(cur_segm_pred),\n                                      torch.zeros_like(cur_segm_pred))\n            else:\n                loss = self.loss_segm(\n                    cur_segm_pred,\n                    segm_targets,\n                    avg_factor=num_imgs * mask_h * mask_w)\n            loss_segm.append(loss)\n        return dict(loss_segm=loss_segm)\n\n    def get_targets(self, segm_pred, gt_masks, gt_labels):\n        \"\"\"Compute semantic segmentation targets for each image.\n\n        Args:\n            segm_pred (Tensor): Predicted semantic segmentation map\n                with shape (num_classes, H, W).\n            gt_masks (Tensor): Ground truth masks for each image with\n                the same shape of the input image.\n            gt_labels (Tensor): Class indices corresponding to each box.\n\n        Returns:\n            Tensor: Semantic segmentation targets with shape\n                (num_classes, H, W).\n        \"\"\"\n        if gt_masks.size(0) == 0:\n            return None\n        num_classes, mask_h, mask_w = segm_pred.size()\n        with torch.no_grad():\n            downsampled_masks = F.interpolate(\n                gt_masks.unsqueeze(0), (mask_h, mask_w),\n                mode='bilinear',\n                align_corners=False).squeeze(0)\n            downsampled_masks = downsampled_masks.gt(0.5).float()\n            segm_targets = torch.zeros_like(segm_pred, requires_grad=False)\n            for obj_idx in range(downsampled_masks.size(0)):\n                segm_targets[gt_labels[obj_idx] - 1] = torch.max(\n                    segm_targets[gt_labels[obj_idx] - 1],\n                    downsampled_masks[obj_idx])\n            return segm_targets\n\n    def simple_test(self, feats, img_metas, rescale=False):\n        \"\"\"Test function without test-time augmentation.\"\"\"\n        raise NotImplementedError(\n            'simple_test of YOLACTSegmHead is not implemented '\n            'because this head is only evaluated during training')\n\n\n@HEADS.register_module()\nclass YOLACTProtonet(BaseModule):\n    \"\"\"YOLACT mask head used in https://arxiv.org/abs/1904.02689.\n\n    This head outputs the mask prototypes for YOLACT.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        proto_channels (tuple[int]): Output channels of protonet convs.\n        proto_kernel_sizes (tuple[int]): Kernel sizes of protonet convs.\n        include_last_relu (Bool): If keep the last relu of protonet.\n        num_protos (int): Number of prototypes.\n        num_classes (int): Number of categories excluding the background\n            category.\n        loss_mask_weight (float): Reweight the mask loss by this factor.\n        max_masks_to_train (int): Maximum number of masks to train for\n            each image.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels=256,\n                 proto_channels=(256, 256, 256, None, 256, 32),\n                 proto_kernel_sizes=(3, 3, 3, -2, 3, 1),\n                 include_last_relu=True,\n                 num_protos=32,\n                 loss_mask_weight=1.0,\n                 max_masks_to_train=100,\n                 init_cfg=dict(\n                     type='Xavier',\n                     distribution='uniform',\n                     override=dict(name='protonet'))):\n        super(YOLACTProtonet, self).__init__(init_cfg)\n        self.in_channels = in_channels\n        self.proto_channels = proto_channels\n        self.proto_kernel_sizes = proto_kernel_sizes\n        self.include_last_relu = include_last_relu\n        self.protonet = self._init_layers()\n\n        self.loss_mask_weight = loss_mask_weight\n        self.num_protos = num_protos\n        self.num_classes = num_classes\n        self.max_masks_to_train = max_masks_to_train\n        self.fp16_enabled = False\n\n    def _init_layers(self):\n        \"\"\"A helper function to take a config setting and turn it into a\n        network.\"\"\"\n        # Possible patterns:\n        # ( 256, 3) -> conv\n        # ( 256,-2) -> deconv\n        # (None,-2) -> bilinear interpolate\n        in_channels = self.in_channels\n        protonets = ModuleList()\n        for num_channels, kernel_size in zip(self.proto_channels,\n                                             self.proto_kernel_sizes):\n            if kernel_size > 0:\n                layer = nn.Conv2d(\n                    in_channels,\n                    num_channels,\n                    kernel_size,\n                    padding=kernel_size // 2)\n            else:\n                if num_channels is None:\n                    layer = InterpolateModule(\n                        scale_factor=-kernel_size,\n                        mode='bilinear',\n                        align_corners=False)\n                else:\n                    layer = nn.ConvTranspose2d(\n                        in_channels,\n                        num_channels,\n                        -kernel_size,\n                        padding=kernel_size // 2)\n            protonets.append(layer)\n            protonets.append(nn.ReLU(inplace=True))\n            in_channels = num_channels if num_channels is not None \\\n                else in_channels\n        if not self.include_last_relu:\n            protonets = protonets[:-1]\n        return nn.Sequential(*protonets)\n\n    def forward_dummy(self, x):\n        prototypes = self.protonet(x)\n        return prototypes\n\n    def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=None):\n        \"\"\"Forward feature from the upstream network to get prototypes and\n        linearly combine the prototypes, using masks coefficients, into\n        instance masks. Finally, crop the instance masks with given bboxes.\n\n        Args:\n            x (Tensor): Feature from the upstream network, which is\n                a 4D-tensor.\n            coeff_pred (list[Tensor]): Mask coefficients for each scale\n                level with shape (N, num_anchors * num_protos, H, W).\n            bboxes (list[Tensor]): Box used for cropping with shape\n                (N, num_anchors * 4, H, W). During training, they are\n                ground truth boxes. During testing, they are predicted\n                boxes.\n            img_meta (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            sampling_results (List[:obj:``SamplingResult``]): Sampler results\n                for each image.\n\n        Returns:\n            list[Tensor]: Predicted instance segmentation masks.\n        \"\"\"\n        prototypes = self.protonet(x)\n        prototypes = prototypes.permute(0, 2, 3, 1).contiguous()\n\n        num_imgs = x.size(0)\n\n        # The reason for not using self.training is that\n        # val workflow will have a dimension mismatch error.\n        # Note that this writing method is very tricky.\n        # Fix https://github.com/open-mmlab/mmdetection/issues/5978\n        is_train_or_val_workflow = (coeff_pred[0].dim() == 4)\n\n        # Train or val workflow\n        if is_train_or_val_workflow:\n            coeff_pred_list = []\n            for coeff_pred_per_level in coeff_pred:\n                coeff_pred_per_level = \\\n                    coeff_pred_per_level.permute(\n                        0, 2, 3, 1).reshape(num_imgs, -1, self.num_protos)\n                coeff_pred_list.append(coeff_pred_per_level)\n            coeff_pred = torch.cat(coeff_pred_list, dim=1)\n\n        mask_pred_list = []\n        for idx in range(num_imgs):\n            cur_prototypes = prototypes[idx]\n            cur_coeff_pred = coeff_pred[idx]\n            cur_bboxes = bboxes[idx]\n            cur_img_meta = img_meta[idx]\n\n            # Testing state\n            if not is_train_or_val_workflow:\n                bboxes_for_cropping = cur_bboxes\n            else:\n                cur_sampling_results = sampling_results[idx]\n                pos_assigned_gt_inds = \\\n                    cur_sampling_results.pos_assigned_gt_inds\n                bboxes_for_cropping = cur_bboxes[pos_assigned_gt_inds].clone()\n                pos_inds = cur_sampling_results.pos_inds\n                cur_coeff_pred = cur_coeff_pred[pos_inds]\n\n            # Linearly combine the prototypes with the mask coefficients\n            mask_pred = cur_prototypes @ cur_coeff_pred.t()\n            mask_pred = torch.sigmoid(mask_pred)\n\n            h, w = cur_img_meta['img_shape'][:2]\n            bboxes_for_cropping[:, 0] /= w\n            bboxes_for_cropping[:, 1] /= h\n            bboxes_for_cropping[:, 2] /= w\n            bboxes_for_cropping[:, 3] /= h\n\n            mask_pred = self.crop(mask_pred, bboxes_for_cropping)\n            mask_pred = mask_pred.permute(2, 0, 1).contiguous()\n            mask_pred_list.append(mask_pred)\n        return mask_pred_list\n\n    @force_fp32(apply_to=('mask_pred', ))\n    def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_results):\n        \"\"\"Compute loss of the head.\n\n        Args:\n            mask_pred (list[Tensor]): Predicted prototypes with shape\n                (num_classes, H, W).\n            gt_masks (list[Tensor]): Ground truth masks for each image with\n                the same shape of the input image.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            img_meta (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            sampling_results (List[:obj:``SamplingResult``]): Sampler results\n                for each image.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        loss_mask = []\n        num_imgs = len(mask_pred)\n        total_pos = 0\n        for idx in range(num_imgs):\n            cur_mask_pred = mask_pred[idx]\n            cur_gt_masks = gt_masks[idx].float()\n            cur_gt_bboxes = gt_bboxes[idx]\n            cur_img_meta = img_meta[idx]\n            cur_sampling_results = sampling_results[idx]\n\n            pos_assigned_gt_inds = cur_sampling_results.pos_assigned_gt_inds\n            num_pos = pos_assigned_gt_inds.size(0)\n            # Since we're producing (near) full image masks,\n            # it'd take too much vram to backprop on every single mask.\n            # Thus we select only a subset.\n            if num_pos > self.max_masks_to_train:\n                perm = torch.randperm(num_pos)\n                select = perm[:self.max_masks_to_train]\n                cur_mask_pred = cur_mask_pred[select]\n                pos_assigned_gt_inds = pos_assigned_gt_inds[select]\n                num_pos = self.max_masks_to_train\n            total_pos += num_pos\n\n            gt_bboxes_for_reweight = cur_gt_bboxes[pos_assigned_gt_inds]\n\n            mask_targets = self.get_targets(cur_mask_pred, cur_gt_masks,\n                                            pos_assigned_gt_inds)\n            if num_pos == 0:\n                loss = cur_mask_pred.sum() * 0.\n            elif mask_targets is None:\n                loss = F.binary_cross_entropy(cur_mask_pred,\n                                              torch.zeros_like(cur_mask_pred),\n                                              torch.zeros_like(cur_mask_pred))\n            else:\n                cur_mask_pred = torch.clamp(cur_mask_pred, 0, 1)\n                loss = F.binary_cross_entropy(\n                    cur_mask_pred, mask_targets,\n                    reduction='none') * self.loss_mask_weight\n\n                h, w = cur_img_meta['img_shape'][:2]\n                gt_bboxes_width = (gt_bboxes_for_reweight[:, 2] -\n                                   gt_bboxes_for_reweight[:, 0]) / w\n                gt_bboxes_height = (gt_bboxes_for_reweight[:, 3] -\n                                    gt_bboxes_for_reweight[:, 1]) / h\n                loss = loss.mean(dim=(1,\n                                      2)) / gt_bboxes_width / gt_bboxes_height\n                loss = torch.sum(loss)\n            loss_mask.append(loss)\n\n        if total_pos == 0:\n            total_pos += 1  # avoid nan\n        loss_mask = [x / total_pos for x in loss_mask]\n\n        return dict(loss_mask=loss_mask)\n\n    def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds):\n        \"\"\"Compute instance segmentation targets for each image.\n\n        Args:\n            mask_pred (Tensor): Predicted prototypes with shape\n                (num_classes, H, W).\n            gt_masks (Tensor): Ground truth masks for each image with\n                the same shape of the input image.\n            pos_assigned_gt_inds (Tensor): GT indices of the corresponding\n                positive samples.\n        Returns:\n            Tensor: Instance segmentation targets with shape\n                (num_instances, H, W).\n        \"\"\"\n        if gt_masks.size(0) == 0:\n            return None\n        mask_h, mask_w = mask_pred.shape[-2:]\n        gt_masks = F.interpolate(\n            gt_masks.unsqueeze(0), (mask_h, mask_w),\n            mode='bilinear',\n            align_corners=False).squeeze(0)\n        gt_masks = gt_masks.gt(0.5).float()\n        mask_targets = gt_masks[pos_assigned_gt_inds]\n        return mask_targets\n\n    def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale):\n        \"\"\"Resize, binarize, and format the instance mask predictions.\n\n        Args:\n            mask_pred (Tensor): shape (N, H, W).\n            label_pred (Tensor): shape (N, ).\n            img_meta (dict): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If rescale is False, then returned masks will\n                fit the scale of imgs[0].\n        Returns:\n            list[ndarray]: Mask predictions grouped by their predicted classes.\n        \"\"\"\n        ori_shape = img_meta['ori_shape']\n        scale_factor = img_meta['scale_factor']\n        if rescale:\n            img_h, img_w = ori_shape[:2]\n        else:\n            img_h = np.round(ori_shape[0] * scale_factor[1]).astype(np.int32)\n            img_w = np.round(ori_shape[1] * scale_factor[0]).astype(np.int32)\n\n        cls_segms = [[] for _ in range(self.num_classes)]\n        if mask_pred.size(0) == 0:\n            return cls_segms\n\n        mask_pred = F.interpolate(\n            mask_pred.unsqueeze(0), (img_h, img_w),\n            mode='bilinear',\n            align_corners=False).squeeze(0) > 0.5\n        mask_pred = mask_pred.cpu().numpy().astype(np.uint8)\n\n        for m, l in zip(mask_pred, label_pred):\n            cls_segms[l].append(m)\n        return cls_segms\n\n    def crop(self, masks, boxes, padding=1):\n        \"\"\"Crop predicted masks by zeroing out everything not in the predicted\n        bbox.\n\n        Args:\n            masks (Tensor): shape [H, W, N].\n            boxes (Tensor): bbox coords in relative point form with\n                shape [N, 4].\n\n        Return:\n            Tensor: The cropped masks.\n        \"\"\"\n        h, w, n = masks.size()\n        x1, x2 = self.sanitize_coordinates(\n            boxes[:, 0], boxes[:, 2], w, padding, cast=False)\n        y1, y2 = self.sanitize_coordinates(\n            boxes[:, 1], boxes[:, 3], h, padding, cast=False)\n\n        rows = torch.arange(\n            w, device=masks.device, dtype=x1.dtype).view(1, -1,\n                                                         1).expand(h, w, n)\n        cols = torch.arange(\n            h, device=masks.device, dtype=x1.dtype).view(-1, 1,\n                                                         1).expand(h, w, n)\n\n        masks_left = rows >= x1.view(1, 1, -1)\n        masks_right = rows < x2.view(1, 1, -1)\n        masks_up = cols >= y1.view(1, 1, -1)\n        masks_down = cols < y2.view(1, 1, -1)\n\n        crop_mask = masks_left * masks_right * masks_up * masks_down\n\n        return masks * crop_mask.float()\n\n    def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True):\n        \"\"\"Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0,\n        and x2 <= image_size. Also converts from relative to absolute\n        coordinates and casts the results to long tensors.\n\n        Warning: this does things in-place behind the scenes so\n        copy if necessary.\n\n        Args:\n            _x1 (Tensor): shape (N, ).\n            _x2 (Tensor): shape (N, ).\n            img_size (int): Size of the input image.\n            padding (int): x1 >= padding, x2 <= image_size-padding.\n            cast (bool): If cast is false, the result won't be cast to longs.\n\n        Returns:\n            tuple:\n                x1 (Tensor): Sanitized _x1.\n                x2 (Tensor): Sanitized _x2.\n        \"\"\"\n        x1 = x1 * img_size\n        x2 = x2 * img_size\n        if cast:\n            x1 = x1.long()\n            x2 = x2.long()\n        x1 = torch.min(x1, x2)\n        x2 = torch.max(x1, x2)\n        x1 = torch.clamp(x1 - padding, min=0)\n        x2 = torch.clamp(x2 + padding, max=img_size)\n        return x1, x2\n\n    def simple_test(self,\n                    feats,\n                    det_bboxes,\n                    det_labels,\n                    det_coeffs,\n                    img_metas,\n                    rescale=False):\n        \"\"\"Test function without test-time augmentation.\n\n        Args:\n            feats (tuple[torch.Tensor]): Multi-level features from the\n               upstream network, each is a 4D-tensor.\n            det_bboxes (list[Tensor]): BBox results of each image. each\n               element is (n, 5) tensor, where 5 represent\n               (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.\n            det_labels (list[Tensor]): BBox results of each image. each\n               element is (n, ) tensor, each element represents the class\n               label of the corresponding box.\n            det_coeffs (list[Tensor]): BBox coefficient of each image. each\n               element is (n, m) tensor, m is vector length.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[list]: encoded masks. The c-th item in the outer list\n                corresponds to the c-th class. Given the c-th outer list, the\n                i-th item in that inner list is the mask for the i-th box with\n                class label c.\n        \"\"\"\n        num_imgs = len(img_metas)\n        scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n        if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n            segm_results = [[[] for _ in range(self.num_classes)]\n                            for _ in range(num_imgs)]\n        else:\n            # if det_bboxes is rescaled to the original image size, we need to\n            # rescale it back to the testing scale to obtain RoIs.\n            if rescale and not isinstance(scale_factors[0], float):\n                scale_factors = [\n                    torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n                    for scale_factor in scale_factors\n                ]\n            _bboxes = [\n                det_bboxes[i][:, :4] *\n                scale_factors[i] if rescale else det_bboxes[i][:, :4]\n                for i in range(len(det_bboxes))\n            ]\n            mask_preds = self.forward(feats[0], det_coeffs, _bboxes, img_metas)\n            # apply mask post-processing to each image individually\n            segm_results = []\n            for i in range(num_imgs):\n                if det_bboxes[i].shape[0] == 0:\n                    segm_results.append([[] for _ in range(self.num_classes)])\n                else:\n                    segm_result = self.get_seg_masks(mask_preds[i],\n                                                     det_labels[i],\n                                                     img_metas[i], rescale)\n                    segm_results.append(segm_result)\n        return segm_results\n\n\nclass InterpolateModule(BaseModule):\n    \"\"\"This is a module version of F.interpolate.\n\n    Any arguments you give it just get passed along for the ride.\n    \"\"\"\n\n    def __init__(self, *args, init_cfg=None, **kwargs):\n        super().__init__(init_cfg)\n\n        self.args = args\n        self.kwargs = kwargs\n\n    def forward(self, x):\n        \"\"\"Forward features from the upstream network.\"\"\"\n        return F.interpolate(x, *self.args, **self.kwargs)\n"
  },
  {
    "path": "mmdet/models/dense_heads/yolo_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Copyright (c) 2019 Western Digital Corporation or its affiliates.\n\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm,\n                      normal_init)\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (build_assigner, build_bbox_coder,\n                        build_prior_generator, build_sampler, images_to_levels,\n                        multi_apply, multiclass_nms)\nfrom ..builder import HEADS, build_loss\nfrom .base_dense_head import BaseDenseHead\nfrom .dense_test_mixins import BBoxTestMixin\n\n\n@HEADS.register_module()\nclass YOLOV3Head(BaseDenseHead, BBoxTestMixin):\n    \"\"\"YOLOV3Head Paper link: https://arxiv.org/abs/1804.02767.\n\n    Args:\n        num_classes (int): The number of object classes (w/o background)\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (List[int]): The number of output channels per scale\n            before the final 1x1 layer. Default: (1024, 512, 256).\n        anchor_generator (dict): Config dict for anchor generator\n        bbox_coder (dict): Config of bounding box coder.\n        featmap_strides (List[int]): The stride of each scale.\n            Should be in descending order. Default: (32, 16, 8).\n        one_hot_smoother (float): Set a non-zero value to enable label-smooth\n            Default: 0.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: dict(type='BN', requires_grad=True)\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        loss_cls (dict): Config of classification loss.\n        loss_conf (dict): Config of confidence loss.\n        loss_xy (dict): Config of xy coordinate loss.\n        loss_wh (dict): Config of wh coordinate loss.\n        train_cfg (dict): Training config of YOLOV3 head. Default: None.\n        test_cfg (dict): Testing config of YOLOV3 head. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 out_channels=(1024, 512, 256),\n                 anchor_generator=dict(\n                     type='YOLOAnchorGenerator',\n                     base_sizes=[[(116, 90), (156, 198), (373, 326)],\n                                 [(30, 61), (62, 45), (59, 119)],\n                                 [(10, 13), (16, 30), (33, 23)]],\n                     strides=[32, 16, 8]),\n                 bbox_coder=dict(type='YOLOBBoxCoder'),\n                 featmap_strides=[32, 16, 8],\n                 one_hot_smoother=0.,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 act_cfg=dict(type='LeakyReLU', negative_slope=0.1),\n                 loss_cls=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 loss_conf=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 loss_xy=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 loss_wh=dict(type='MSELoss', loss_weight=1.0),\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=dict(\n                     type='Normal', std=0.01,\n                     override=dict(name='convs_pred'))):\n        super(YOLOV3Head, self).__init__(init_cfg)\n        # Check params\n        assert (len(in_channels) == len(out_channels) == len(featmap_strides))\n\n        self.num_classes = num_classes\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.featmap_strides = featmap_strides\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        if self.train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            if hasattr(self.train_cfg, 'sampler'):\n                sampler_cfg = self.train_cfg.sampler\n            else:\n                sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n        self.fp16_enabled = False\n\n        self.one_hot_smoother = one_hot_smoother\n\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n\n        self.bbox_coder = build_bbox_coder(bbox_coder)\n\n        self.prior_generator = build_prior_generator(anchor_generator)\n\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_conf = build_loss(loss_conf)\n        self.loss_xy = build_loss(loss_xy)\n        self.loss_wh = build_loss(loss_wh)\n\n        self.num_base_priors = self.prior_generator.num_base_priors[0]\n        assert len(\n            self.prior_generator.num_base_priors) == len(featmap_strides)\n        self._init_layers()\n\n    @property\n    def anchor_generator(self):\n\n        warnings.warn('DeprecationWarning: `anchor_generator` is deprecated, '\n                      'please use \"prior_generator\" instead')\n        return self.prior_generator\n\n    @property\n    def num_anchors(self):\n        \"\"\"\n        Returns:\n            int: Number of anchors on each point of feature map.\n        \"\"\"\n        warnings.warn('DeprecationWarning: `num_anchors` is deprecated, '\n                      'please use \"num_base_priors\" instead')\n        return self.num_base_priors\n\n    @property\n    def num_levels(self):\n        return len(self.featmap_strides)\n\n    @property\n    def num_attrib(self):\n        \"\"\"int: number of attributes in pred_map, bboxes (4) +\n        objectness (1) + num_classes\"\"\"\n\n        return 5 + self.num_classes\n\n    def _init_layers(self):\n        self.convs_bridge = nn.ModuleList()\n        self.convs_pred = nn.ModuleList()\n        for i in range(self.num_levels):\n            conv_bridge = ConvModule(\n                self.in_channels[i],\n                self.out_channels[i],\n                3,\n                padding=1,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg,\n                act_cfg=self.act_cfg)\n            conv_pred = nn.Conv2d(self.out_channels[i],\n                                  self.num_base_priors * self.num_attrib, 1)\n\n            self.convs_bridge.append(conv_bridge)\n            self.convs_pred.append(conv_pred)\n\n    def init_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, mean=0, std=0.01)\n            if is_norm(m):\n                constant_init(m, 1)\n\n        # Use prior in model initialization to improve stability\n        for conv_pred, stride in zip(self.convs_pred, self.featmap_strides):\n            bias = conv_pred.bias.reshape(self.num_base_priors, -1)\n            # init objectness with prior of 8 objects per feature map\n            # refer to https://github.com/ultralytics/yolov3\n            nn.init.constant_(bias.data[:, 4],\n                              bias_init_with_prob(8 / (608 / stride)**2))\n            nn.init.constant_(bias.data[:, 5:], bias_init_with_prob(0.01))\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n\n        Returns:\n            tuple[Tensor]: A tuple of multi-level predication map, each is a\n                4D-tensor of shape (batch_size, 5+num_classes, height, width).\n        \"\"\"\n\n        assert len(feats) == self.num_levels\n        pred_maps = []\n        for i in range(self.num_levels):\n            x = feats[i]\n            x = self.convs_bridge[i](x)\n            pred_map = self.convs_pred[i](x)\n            pred_maps.append(pred_map)\n\n        return tuple(pred_maps),\n\n    @force_fp32(apply_to=('pred_maps', ))\n    def get_bboxes(self,\n                   pred_maps,\n                   img_metas,\n                   cfg=None,\n                   rescale=False,\n                   with_nms=True):\n        \"\"\"Transform network output for a batch into bbox predictions. It has\n        been accelerated since PR #5991.\n\n        Args:\n            pred_maps (list[Tensor]): Raw predictions for a batch of images.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            cfg (mmcv.Config | None): Test / postprocessing configuration,\n                if None, test_cfg would be used. Default: None.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default: True.\n\n        Returns:\n            list[tuple[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n                The first item is an (n, 5) tensor, where 5 represent\n                (tl_x, tl_y, br_x, br_y, score) and the score between 0 and 1.\n                The shape of the second tensor in the tuple is (n,), and\n                each element represents the class label of the corresponding\n                box.\n        \"\"\"\n        assert len(pred_maps) == self.num_levels\n        cfg = self.test_cfg if cfg is None else cfg\n        scale_factors = np.array(\n            [img_meta['scale_factor'] for img_meta in img_metas])\n\n        num_imgs = len(img_metas)\n        featmap_sizes = [pred_map.shape[-2:] for pred_map in pred_maps]\n\n        mlvl_anchors = self.prior_generator.grid_priors(\n            featmap_sizes, device=pred_maps[0].device)\n        flatten_preds = []\n        flatten_strides = []\n        for pred, stride in zip(pred_maps, self.featmap_strides):\n            pred = pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                    self.num_attrib)\n            pred[..., :2].sigmoid_()\n            flatten_preds.append(pred)\n            flatten_strides.append(\n                pred.new_tensor(stride).expand(pred.size(1)))\n\n        flatten_preds = torch.cat(flatten_preds, dim=1)\n        flatten_bbox_preds = flatten_preds[..., :4]\n        flatten_objectness = flatten_preds[..., 4].sigmoid()\n        flatten_cls_scores = flatten_preds[..., 5:].sigmoid()\n        flatten_anchors = torch.cat(mlvl_anchors)\n        flatten_strides = torch.cat(flatten_strides)\n        flatten_bboxes = self.bbox_coder.decode(flatten_anchors,\n                                                flatten_bbox_preds,\n                                                flatten_strides.unsqueeze(-1))\n\n        if with_nms and (flatten_objectness.size(0) == 0):\n            return torch.zeros((0, 5)), torch.zeros((0, ))\n\n        if rescale:\n            flatten_bboxes /= flatten_bboxes.new_tensor(\n                scale_factors).unsqueeze(1)\n\n        padding = flatten_bboxes.new_zeros(num_imgs, flatten_bboxes.shape[1],\n                                           1)\n        flatten_cls_scores = torch.cat([flatten_cls_scores, padding], dim=-1)\n\n        det_results = []\n        for (bboxes, scores, objectness) in zip(flatten_bboxes,\n                                                flatten_cls_scores,\n                                                flatten_objectness):\n            # Filtering out all predictions with conf < conf_thr\n            conf_thr = cfg.get('conf_thr', -1)\n            if conf_thr > 0:\n                conf_inds = objectness >= conf_thr\n                bboxes = bboxes[conf_inds, :]\n                scores = scores[conf_inds, :]\n                objectness = objectness[conf_inds]\n\n            det_bboxes, det_labels = multiclass_nms(\n                bboxes,\n                scores,\n                cfg.score_thr,\n                cfg.nms,\n                cfg.max_per_img,\n                score_factors=objectness)\n            det_results.append(tuple([det_bboxes, det_labels]))\n        return det_results\n\n    @force_fp32(apply_to=('pred_maps', ))\n    def loss(self,\n             pred_maps,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute loss of the head.\n\n        Args:\n            pred_maps (list[Tensor]): Prediction map for each scale level,\n                shape (N, num_anchors * num_attrib, H, W)\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        num_imgs = len(img_metas)\n        device = pred_maps[0][0].device\n\n        featmap_sizes = [\n            pred_maps[i].shape[-2:] for i in range(self.num_levels)\n        ]\n        mlvl_anchors = self.prior_generator.grid_priors(\n            featmap_sizes, device=device)\n        anchor_list = [mlvl_anchors for _ in range(num_imgs)]\n\n        responsible_flag_list = []\n        for img_id in range(len(img_metas)):\n            responsible_flag_list.append(\n                self.prior_generator.responsible_flags(featmap_sizes,\n                                                       gt_bboxes[img_id],\n                                                       device))\n\n        target_maps_list, neg_maps_list = self.get_targets(\n            anchor_list, responsible_flag_list, gt_bboxes, gt_labels)\n\n        losses_cls, losses_conf, losses_xy, losses_wh = multi_apply(\n            self.loss_single, pred_maps, target_maps_list, neg_maps_list)\n\n        return dict(\n            loss_cls=losses_cls,\n            loss_conf=losses_conf,\n            loss_xy=losses_xy,\n            loss_wh=losses_wh)\n\n    def loss_single(self, pred_map, target_map, neg_map):\n        \"\"\"Compute loss of a single image from a batch.\n\n        Args:\n            pred_map (Tensor): Raw predictions for a single level.\n            target_map (Tensor): The Ground-Truth target for a single level.\n            neg_map (Tensor): The negative masks for a single level.\n\n        Returns:\n            tuple:\n                loss_cls (Tensor): Classification loss.\n                loss_conf (Tensor): Confidence loss.\n                loss_xy (Tensor): Regression loss of x, y coordinate.\n                loss_wh (Tensor): Regression loss of w, h coordinate.\n        \"\"\"\n\n        num_imgs = len(pred_map)\n        pred_map = pred_map.permute(0, 2, 3,\n                                    1).reshape(num_imgs, -1, self.num_attrib)\n        neg_mask = neg_map.float()\n        pos_mask = target_map[..., 4]\n        pos_and_neg_mask = neg_mask + pos_mask\n        pos_mask = pos_mask.unsqueeze(dim=-1)\n        if torch.max(pos_and_neg_mask) > 1.:\n            warnings.warn('There is overlap between pos and neg sample.')\n            pos_and_neg_mask = pos_and_neg_mask.clamp(min=0., max=1.)\n\n        pred_xy = pred_map[..., :2]\n        pred_wh = pred_map[..., 2:4]\n        pred_conf = pred_map[..., 4]\n        pred_label = pred_map[..., 5:]\n\n        target_xy = target_map[..., :2]\n        target_wh = target_map[..., 2:4]\n        target_conf = target_map[..., 4]\n        target_label = target_map[..., 5:]\n\n        loss_cls = self.loss_cls(pred_label, target_label, weight=pos_mask)\n        loss_conf = self.loss_conf(\n            pred_conf, target_conf, weight=pos_and_neg_mask)\n        loss_xy = self.loss_xy(pred_xy, target_xy, weight=pos_mask)\n        loss_wh = self.loss_wh(pred_wh, target_wh, weight=pos_mask)\n\n        return loss_cls, loss_conf, loss_xy, loss_wh\n\n    def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list,\n                    gt_labels_list):\n        \"\"\"Compute target maps for anchors in multiple images.\n\n        Args:\n            anchor_list (list[list[Tensor]]): Multi level anchors of each\n                image. The outer list indicates images, and the inner list\n                corresponds to feature levels of the image. Each element of\n                the inner list is a tensor of shape (num_total_anchors, 4).\n            responsible_flag_list (list[list[Tensor]]): Multi level responsible\n                flags of each image. Each element is a tensor of shape\n                (num_total_anchors, )\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            gt_labels_list (list[Tensor]): Ground truth labels of each box.\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n                - target_map_list (list[Tensor]): Target map of each level.\n                - neg_map_list (list[Tensor]): Negative map of each level.\n        \"\"\"\n        num_imgs = len(anchor_list)\n\n        # anchor number of multi levels\n        num_level_anchors = [anchors.size(0) for anchors in anchor_list[0]]\n\n        results = multi_apply(self._get_targets_single, anchor_list,\n                              responsible_flag_list, gt_bboxes_list,\n                              gt_labels_list)\n\n        all_target_maps, all_neg_maps = results\n        assert num_imgs == len(all_target_maps) == len(all_neg_maps)\n        target_maps_list = images_to_levels(all_target_maps, num_level_anchors)\n        neg_maps_list = images_to_levels(all_neg_maps, num_level_anchors)\n\n        return target_maps_list, neg_maps_list\n\n    def _get_targets_single(self, anchors, responsible_flags, gt_bboxes,\n                            gt_labels):\n        \"\"\"Generate matching bounding box prior and converted GT.\n\n        Args:\n            anchors (list[Tensor]): Multi-level anchors of the image.\n            responsible_flags (list[Tensor]): Multi-level responsible flags of\n                anchors\n            gt_bboxes (Tensor): Ground truth bboxes of single image.\n            gt_labels (Tensor): Ground truth labels of single image.\n\n        Returns:\n            tuple:\n                target_map (Tensor): Predication target map of each\n                    scale level, shape (num_total_anchors,\n                    5+num_classes)\n                neg_map (Tensor): Negative map of each scale level,\n                    shape (num_total_anchors,)\n        \"\"\"\n\n        anchor_strides = []\n        for i in range(len(anchors)):\n            anchor_strides.append(\n                torch.tensor(self.featmap_strides[i],\n                             device=gt_bboxes.device).repeat(len(anchors[i])))\n        concat_anchors = torch.cat(anchors)\n        concat_responsible_flags = torch.cat(responsible_flags)\n\n        anchor_strides = torch.cat(anchor_strides)\n        assert len(anchor_strides) == len(concat_anchors) == \\\n               len(concat_responsible_flags)\n        assign_result = self.assigner.assign(concat_anchors,\n                                             concat_responsible_flags,\n                                             gt_bboxes)\n        sampling_result = self.sampler.sample(assign_result, concat_anchors,\n                                              gt_bboxes)\n\n        target_map = concat_anchors.new_zeros(\n            concat_anchors.size(0), self.num_attrib)\n\n        target_map[sampling_result.pos_inds, :4] = self.bbox_coder.encode(\n            sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes,\n            anchor_strides[sampling_result.pos_inds])\n\n        target_map[sampling_result.pos_inds, 4] = 1\n\n        gt_labels_one_hot = F.one_hot(\n            gt_labels, num_classes=self.num_classes).float()\n        if self.one_hot_smoother != 0:  # label smooth\n            gt_labels_one_hot = gt_labels_one_hot * (\n                1 - self.one_hot_smoother\n            ) + self.one_hot_smoother / self.num_classes\n        target_map[sampling_result.pos_inds, 5:] = gt_labels_one_hot[\n            sampling_result.pos_assigned_gt_inds]\n\n        neg_map = concat_anchors.new_zeros(\n            concat_anchors.size(0), dtype=torch.uint8)\n        neg_map[sampling_result.neg_inds] = 1\n\n        return target_map, neg_map\n\n    def aug_test(self, feats, img_metas, rescale=False):\n        \"\"\"Test function with test time augmentation.\n\n        Args:\n            feats (list[Tensor]): the outer list indicates test-time\n                augmentations and inner Tensor should have a shape NxCxHxW,\n                which contains features for all images in the batch.\n            img_metas (list[list[dict]]): the outer list indicates test-time\n                augs (multiscale, flip, etc.) and the inner list indicates\n                images in a batch. each dict has image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[ndarray]: bbox results of each class\n        \"\"\"\n        return self.aug_test_bboxes(feats, img_metas, rescale=rescale)\n\n    @force_fp32(apply_to=('pred_maps'))\n    def onnx_export(self, pred_maps, img_metas, with_nms=True):\n        num_levels = len(pred_maps)\n        pred_maps_list = [pred_maps[i].detach() for i in range(num_levels)]\n\n        cfg = self.test_cfg\n        assert len(pred_maps_list) == self.num_levels\n\n        device = pred_maps_list[0].device\n        batch_size = pred_maps_list[0].shape[0]\n\n        featmap_sizes = [\n            pred_maps_list[i].shape[-2:] for i in range(self.num_levels)\n        ]\n        mlvl_anchors = self.prior_generator.grid_priors(\n            featmap_sizes, device=device)\n        # convert to tensor to keep tracing\n        nms_pre_tensor = torch.tensor(\n            cfg.get('nms_pre', -1), device=device, dtype=torch.long)\n\n        multi_lvl_bboxes = []\n        multi_lvl_cls_scores = []\n        multi_lvl_conf_scores = []\n        for i in range(self.num_levels):\n            # get some key info for current scale\n            pred_map = pred_maps_list[i]\n            stride = self.featmap_strides[i]\n            # (b,h, w, num_anchors*num_attrib) ->\n            # (b,h*w*num_anchors, num_attrib)\n            pred_map = pred_map.permute(0, 2, 3,\n                                        1).reshape(batch_size, -1,\n                                                   self.num_attrib)\n            # Inplace operation like\n            # ```pred_map[..., :2] = \\torch.sigmoid(pred_map[..., :2])```\n            # would create constant tensor when exporting to onnx\n            pred_map_conf = torch.sigmoid(pred_map[..., :2])\n            pred_map_rest = pred_map[..., 2:]\n            pred_map = torch.cat([pred_map_conf, pred_map_rest], dim=-1)\n            pred_map_boxes = pred_map[..., :4]\n            multi_lvl_anchor = mlvl_anchors[i]\n            multi_lvl_anchor = multi_lvl_anchor.expand_as(pred_map_boxes)\n            bbox_pred = self.bbox_coder.decode(multi_lvl_anchor,\n                                               pred_map_boxes, stride)\n            # conf and cls\n            conf_pred = torch.sigmoid(pred_map[..., 4])\n            cls_pred = torch.sigmoid(pred_map[..., 5:]).view(\n                batch_size, -1, self.num_classes)  # Cls pred one-hot.\n\n            # Get top-k prediction\n            from mmdet.core.export import get_k_for_topk\n            nms_pre = get_k_for_topk(nms_pre_tensor, bbox_pred.shape[1])\n            if nms_pre > 0:\n                _, topk_inds = conf_pred.topk(nms_pre)\n                batch_inds = torch.arange(batch_size).view(\n                    -1, 1).expand_as(topk_inds).long()\n                # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501\n                transformed_inds = (\n                    bbox_pred.shape[1] * batch_inds + topk_inds)\n                bbox_pred = bbox_pred.reshape(-1,\n                                              4)[transformed_inds, :].reshape(\n                                                  batch_size, -1, 4)\n                cls_pred = cls_pred.reshape(\n                    -1, self.num_classes)[transformed_inds, :].reshape(\n                        batch_size, -1, self.num_classes)\n                conf_pred = conf_pred.reshape(-1, 1)[transformed_inds].reshape(\n                    batch_size, -1)\n\n            # Save the result of current scale\n            multi_lvl_bboxes.append(bbox_pred)\n            multi_lvl_cls_scores.append(cls_pred)\n            multi_lvl_conf_scores.append(conf_pred)\n\n        # Merge the results of different scales together\n        batch_mlvl_bboxes = torch.cat(multi_lvl_bboxes, dim=1)\n        batch_mlvl_scores = torch.cat(multi_lvl_cls_scores, dim=1)\n        batch_mlvl_conf_scores = torch.cat(multi_lvl_conf_scores, dim=1)\n\n        # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment\n        from mmdet.core.export import add_dummy_nms_for_onnx\n        conf_thr = cfg.get('conf_thr', -1)\n        score_thr = cfg.get('score_thr', -1)\n        # follow original pipeline of YOLOv3\n        if conf_thr > 0:\n            mask = (batch_mlvl_conf_scores >= conf_thr).float()\n            batch_mlvl_conf_scores *= mask\n        if score_thr > 0:\n            mask = (batch_mlvl_scores > score_thr).float()\n            batch_mlvl_scores *= mask\n        batch_mlvl_conf_scores = batch_mlvl_conf_scores.unsqueeze(2).expand_as(\n            batch_mlvl_scores)\n        batch_mlvl_scores = batch_mlvl_scores * batch_mlvl_conf_scores\n        if with_nms:\n            max_output_boxes_per_class = cfg.nms.get(\n                'max_output_boxes_per_class', 200)\n            iou_threshold = cfg.nms.get('iou_threshold', 0.5)\n            # keep aligned with original pipeline, improve\n            # mAP by 1% for YOLOv3 in ONNX\n            score_threshold = 0\n            nms_pre = cfg.get('deploy_nms_pre', -1)\n            return add_dummy_nms_for_onnx(\n                batch_mlvl_bboxes,\n                batch_mlvl_scores,\n                max_output_boxes_per_class,\n                iou_threshold,\n                score_threshold,\n                nms_pre,\n                cfg.max_per_img,\n            )\n        else:\n            return batch_mlvl_bboxes, batch_mlvl_scores\n"
  },
  {
    "path": "mmdet/models/dense_heads/yolof_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import (ConvModule, bias_init_with_prob, constant_init, is_norm,\n                      normal_init)\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import anchor_inside_flags, multi_apply, reduce_mean, unmap\nfrom ..builder import HEADS\nfrom .anchor_head import AnchorHead\n\nINF = 1e8\n\n\ndef levels_to_images(mlvl_tensor):\n    \"\"\"Concat multi-level feature maps by image.\n\n    [feature_level0, feature_level1...] -> [feature_image0, feature_image1...]\n    Convert the shape of each element in mlvl_tensor from (N, C, H, W) to\n    (N, H*W , C), then split the element to N elements with shape (H*W, C), and\n    concat elements in same image of all level along first dimension.\n\n    Args:\n        mlvl_tensor (list[torch.Tensor]): list of Tensor which collect from\n            corresponding level. Each element is of shape (N, C, H, W)\n\n    Returns:\n        list[torch.Tensor]: A list that contains N tensors and each tensor is\n            of shape (num_elements, C)\n    \"\"\"\n    batch_size = mlvl_tensor[0].size(0)\n    batch_list = [[] for _ in range(batch_size)]\n    channels = mlvl_tensor[0].size(1)\n    for t in mlvl_tensor:\n        t = t.permute(0, 2, 3, 1)\n        t = t.view(batch_size, -1, channels).contiguous()\n        for img in range(batch_size):\n            batch_list[img].append(t[img])\n    return [torch.cat(item, 0) for item in batch_list]\n\n\n@HEADS.register_module()\nclass YOLOFHead(AnchorHead):\n    \"\"\"YOLOFHead Paper link: https://arxiv.org/abs/2103.09460.\n\n    Args:\n        num_classes (int): The number of object classes (w/o background)\n        in_channels (List[int]): The number of input channels per scale.\n        cls_num_convs (int): The number of convolutions of cls branch.\n           Default 2.\n        reg_num_convs (int): The number of convolutions of reg branch.\n           Default 4.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 num_cls_convs=2,\n                 num_reg_convs=4,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 **kwargs):\n        self.num_cls_convs = num_cls_convs\n        self.num_reg_convs = num_reg_convs\n        self.norm_cfg = norm_cfg\n        super(YOLOFHead, self).__init__(num_classes, in_channels, **kwargs)\n\n    def _init_layers(self):\n        cls_subnet = []\n        bbox_subnet = []\n        for i in range(self.num_cls_convs):\n            cls_subnet.append(\n                ConvModule(\n                    self.in_channels,\n                    self.in_channels,\n                    kernel_size=3,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n        for i in range(self.num_reg_convs):\n            bbox_subnet.append(\n                ConvModule(\n                    self.in_channels,\n                    self.in_channels,\n                    kernel_size=3,\n                    padding=1,\n                    norm_cfg=self.norm_cfg))\n        self.cls_subnet = nn.Sequential(*cls_subnet)\n        self.bbox_subnet = nn.Sequential(*bbox_subnet)\n        self.cls_score = nn.Conv2d(\n            self.in_channels,\n            self.num_base_priors * self.num_classes,\n            kernel_size=3,\n            stride=1,\n            padding=1)\n        self.bbox_pred = nn.Conv2d(\n            self.in_channels,\n            self.num_base_priors * 4,\n            kernel_size=3,\n            stride=1,\n            padding=1)\n        self.object_pred = nn.Conv2d(\n            self.in_channels,\n            self.num_base_priors,\n            kernel_size=3,\n            stride=1,\n            padding=1)\n\n    def init_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, mean=0, std=0.01)\n            if is_norm(m):\n                constant_init(m, 1)\n\n        # Use prior in model initialization to improve stability\n        bias_cls = bias_init_with_prob(0.01)\n        torch.nn.init.constant_(self.cls_score.bias, bias_cls)\n\n    def forward_single(self, feature):\n        cls_score = self.cls_score(self.cls_subnet(feature))\n        N, _, H, W = cls_score.shape\n        cls_score = cls_score.view(N, -1, self.num_classes, H, W)\n\n        reg_feat = self.bbox_subnet(feature)\n        bbox_reg = self.bbox_pred(reg_feat)\n        objectness = self.object_pred(reg_feat)\n\n        # implicit objectness\n        objectness = objectness.view(N, -1, 1, H, W)\n        normalized_cls_score = cls_score + objectness - torch.log(\n            1. + torch.clamp(cls_score.exp(), max=INF) +\n            torch.clamp(objectness.exp(), max=INF))\n        normalized_cls_score = normalized_cls_score.view(N, -1, H, W)\n        return normalized_cls_score, bbox_reg\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute losses of the head.\n\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level\n                Has shape (batch, num_anchors * num_classes, h, w)\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level with shape (batch, num_anchors * 4, h, w)\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss. Default: None\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        assert len(cls_scores) == 1\n        assert self.prior_generator.num_levels == 1\n\n        device = cls_scores[0].device\n        featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\n        anchor_list, valid_flag_list = self.get_anchors(\n            featmap_sizes, img_metas, device=device)\n\n        # The output level is always 1\n        anchor_list = [anchors[0] for anchors in anchor_list]\n        valid_flag_list = [valid_flags[0] for valid_flags in valid_flag_list]\n\n        cls_scores_list = levels_to_images(cls_scores)\n        bbox_preds_list = levels_to_images(bbox_preds)\n\n        label_channels = self.cls_out_channels if self.use_sigmoid_cls else 1\n        cls_reg_targets = self.get_targets(\n            cls_scores_list,\n            bbox_preds_list,\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes,\n            img_metas,\n            gt_bboxes_ignore_list=gt_bboxes_ignore,\n            gt_labels_list=gt_labels,\n            label_channels=label_channels)\n        if cls_reg_targets is None:\n            return None\n        (batch_labels, batch_label_weights, num_total_pos, num_total_neg,\n         batch_bbox_weights, batch_pos_predicted_boxes,\n         batch_target_boxes) = cls_reg_targets\n\n        flatten_labels = batch_labels.reshape(-1)\n        batch_label_weights = batch_label_weights.reshape(-1)\n        cls_score = cls_scores[0].permute(0, 2, 3,\n                                          1).reshape(-1, self.cls_out_channels)\n\n        num_total_samples = (num_total_pos +\n                             num_total_neg) if self.sampling else num_total_pos\n        num_total_samples = reduce_mean(\n            cls_score.new_tensor(num_total_samples)).clamp_(1.0).item()\n\n        # classification loss\n        loss_cls = self.loss_cls(\n            cls_score,\n            flatten_labels,\n            batch_label_weights,\n            avg_factor=num_total_samples)\n\n        # regression loss\n        if batch_pos_predicted_boxes.shape[0] == 0:\n            # no pos sample\n            loss_bbox = batch_pos_predicted_boxes.sum() * 0\n        else:\n            loss_bbox = self.loss_bbox(\n                batch_pos_predicted_boxes,\n                batch_target_boxes,\n                batch_bbox_weights.float(),\n                avg_factor=num_total_samples)\n\n        return dict(loss_cls=loss_cls, loss_bbox=loss_bbox)\n\n    def get_targets(self,\n                    cls_scores_list,\n                    bbox_preds_list,\n                    anchor_list,\n                    valid_flag_list,\n                    gt_bboxes_list,\n                    img_metas,\n                    gt_bboxes_ignore_list=None,\n                    gt_labels_list=None,\n                    label_channels=1,\n                    unmap_outputs=True):\n        \"\"\"Compute regression and classification targets for anchors in\n        multiple images.\n\n        Args:\n            cls_scores_list (list[Tensor])： Classification scores of\n                each image. each is a 4D-tensor, the shape is\n                (h * w, num_anchors * num_classes).\n            bbox_preds_list (list[Tensor])： Bbox preds of each image.\n                each is a 4D-tensor, the shape is (h * w, num_anchors * 4).\n            anchor_list (list[Tensor]): Anchors of each image. Each element of\n                is a tensor of shape (h * w * num_anchors, 4).\n            valid_flag_list (list[Tensor]): Valid flags of each image. Each\n               element of is a tensor of shape (h * w * num_anchors, )\n            gt_bboxes_list (list[Tensor]): Ground truth bboxes of each image.\n            img_metas (list[dict]): Meta info of each image.\n            gt_bboxes_ignore_list (list[Tensor]): Ground truth bboxes to be\n                ignored.\n            gt_labels_list (list[Tensor]): Ground truth labels of each box.\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple: Usually returns a tuple containing learning targets.\n\n                - batch_labels (Tensor): Label of all images. Each element \\\n                    of is a tensor of shape (batch, h * w * num_anchors)\n                - batch_label_weights (Tensor): Label weights of all images \\\n                    of is a tensor of shape (batch, h * w * num_anchors)\n                - num_total_pos (int): Number of positive samples in all \\\n                    images.\n                - num_total_neg (int): Number of negative samples in all \\\n                    images.\n            additional_returns: This function enables user-defined returns from\n                `self._get_targets_single`. These returns are currently refined\n                to properties at each feature map (i.e. having HxW dimension).\n                The results will be concatenated after the end\n        \"\"\"\n        num_imgs = len(img_metas)\n        assert len(anchor_list) == len(valid_flag_list) == num_imgs\n\n        # compute targets for each image\n        if gt_bboxes_ignore_list is None:\n            gt_bboxes_ignore_list = [None for _ in range(num_imgs)]\n        if gt_labels_list is None:\n            gt_labels_list = [None for _ in range(num_imgs)]\n        results = multi_apply(\n            self._get_targets_single,\n            bbox_preds_list,\n            anchor_list,\n            valid_flag_list,\n            gt_bboxes_list,\n            gt_bboxes_ignore_list,\n            gt_labels_list,\n            img_metas,\n            label_channels=label_channels,\n            unmap_outputs=unmap_outputs)\n        (all_labels, all_label_weights, pos_inds_list, neg_inds_list,\n         sampling_results_list) = results[:5]\n        rest_results = list(results[5:])  # user-added return values\n        # no valid anchors\n        if any([labels is None for labels in all_labels]):\n            return None\n        # sampled anchors of all images\n        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\n        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\n\n        batch_labels = torch.stack(all_labels, 0)\n        batch_label_weights = torch.stack(all_label_weights, 0)\n\n        res = (batch_labels, batch_label_weights, num_total_pos, num_total_neg)\n        for i, rests in enumerate(rest_results):  # user-added return values\n            rest_results[i] = torch.cat(rests, 0)\n\n        return res + tuple(rest_results)\n\n    def _get_targets_single(self,\n                            bbox_preds,\n                            flat_anchors,\n                            valid_flags,\n                            gt_bboxes,\n                            gt_bboxes_ignore,\n                            gt_labels,\n                            img_meta,\n                            label_channels=1,\n                            unmap_outputs=True):\n        \"\"\"Compute regression and classification targets for anchors in a\n        single image.\n\n        Args:\n            bbox_preds (Tensor): Bbox prediction of the image, which\n                shape is (h * w ,4)\n            flat_anchors (Tensor): Anchors of the image, which shape is\n                (h * w * num_anchors ,4)\n            valid_flags (Tensor): Valid flags of the image, which shape is\n                (h * w * num_anchors,).\n            gt_bboxes (Tensor): Ground truth bboxes of the image,\n                shape (num_gts, 4).\n            gt_bboxes_ignore (Tensor): Ground truth bboxes to be\n                ignored, shape (num_ignored_gts, 4).\n            img_meta (dict): Meta info of the image.\n            gt_labels (Tensor): Ground truth labels of each box,\n                shape (num_gts,).\n            label_channels (int): Channel of label.\n            unmap_outputs (bool): Whether to map outputs back to the original\n                set of anchors.\n\n        Returns:\n            tuple:\n                labels (Tensor): Labels of image, which shape is\n                    (h * w * num_anchors, ).\n                label_weights (Tensor): Label weights of image, which shape is\n                    (h * w * num_anchors, ).\n                pos_inds (Tensor): Pos index of image.\n                neg_inds (Tensor): Neg index of image.\n                sampling_result (obj:`SamplingResult`): Sampling result.\n                pos_bbox_weights (Tensor): The Weight of using to calculate\n                    the bbox branch loss, which shape is (num, ).\n                pos_predicted_boxes (Tensor): boxes predicted value of\n                    using to calculate the bbox branch loss, which shape is\n                    (num, 4).\n                pos_target_boxes (Tensor): boxes target value of\n                    using to calculate the bbox branch loss, which shape is\n                    (num, 4).\n        \"\"\"\n        inside_flags = anchor_inside_flags(flat_anchors, valid_flags,\n                                           img_meta['img_shape'][:2],\n                                           self.train_cfg.allowed_border)\n        if not inside_flags.any():\n            return (None, ) * 8\n        # assign gt and sample anchors\n        anchors = flat_anchors[inside_flags, :]\n        bbox_preds = bbox_preds.reshape(-1, 4)\n        bbox_preds = bbox_preds[inside_flags, :]\n\n        # decoded bbox\n        decoder_bbox_preds = self.bbox_coder.decode(anchors, bbox_preds)\n        assign_result = self.assigner.assign(\n            decoder_bbox_preds, anchors, gt_bboxes, gt_bboxes_ignore,\n            None if self.sampling else gt_labels)\n\n        pos_bbox_weights = assign_result.get_extra_property('pos_idx')\n        pos_predicted_boxes = assign_result.get_extra_property(\n            'pos_predicted_boxes')\n        pos_target_boxes = assign_result.get_extra_property('target_boxes')\n\n        sampling_result = self.sampler.sample(assign_result, anchors,\n                                              gt_bboxes)\n        num_valid_anchors = anchors.shape[0]\n        labels = anchors.new_full((num_valid_anchors, ),\n                                  self.num_classes,\n                                  dtype=torch.long)\n        label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)\n\n        pos_inds = sampling_result.pos_inds\n        neg_inds = sampling_result.neg_inds\n        if len(pos_inds) > 0:\n            if gt_labels is None:\n                # Only rpn gives gt_labels as None\n                # Foreground is the first class since v2.5.0\n                labels[pos_inds] = 0\n            else:\n                labels[pos_inds] = gt_labels[\n                    sampling_result.pos_assigned_gt_inds]\n            if self.train_cfg.pos_weight <= 0:\n                label_weights[pos_inds] = 1.0\n            else:\n                label_weights[pos_inds] = self.train_cfg.pos_weight\n        if len(neg_inds) > 0:\n            label_weights[neg_inds] = 1.0\n\n        # map up to original set of anchors\n        if unmap_outputs:\n            num_total_anchors = flat_anchors.size(0)\n            labels = unmap(\n                labels, num_total_anchors, inside_flags,\n                fill=self.num_classes)  # fill bg label\n            label_weights = unmap(label_weights, num_total_anchors,\n                                  inside_flags)\n\n        return (labels, label_weights, pos_inds, neg_inds, sampling_result,\n                pos_bbox_weights, pos_predicted_boxes, pos_target_boxes)\n"
  },
  {
    "path": "mmdet/models/dense_heads/yolox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import (ConvModule, DepthwiseSeparableConvModule,\n                      bias_init_with_prob)\nfrom mmcv.ops.nms import batched_nms\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import (MlvlPointGenerator, bbox_xyxy_to_cxcywh,\n                        build_assigner, build_sampler, multi_apply,\n                        reduce_mean)\nfrom ..builder import HEADS, build_loss\nfrom .base_dense_head import BaseDenseHead\nfrom .dense_test_mixins import BBoxTestMixin\n\n\n@HEADS.register_module()\nclass YOLOXHead(BaseDenseHead, BBoxTestMixin):\n    \"\"\"YOLOXHead head used in `YOLOX <https://arxiv.org/abs/2107.08430>`_.\n\n    Args:\n        num_classes (int): Number of categories excluding the background\n            category.\n        in_channels (int): Number of channels in the input feature map.\n        feat_channels (int): Number of hidden channels in stacking convs.\n            Default: 256\n        stacked_convs (int): Number of stacking convs of the head.\n            Default: 2.\n        strides (tuple): Downsample factor of each feature map.\n        use_depthwise (bool): Whether to depthwise separable convolution in\n            blocks. Default: False\n        dcn_on_last_conv (bool): If true, use dcn in the last layer of\n            towers. Default: False.\n        conv_bias (bool | str): If specified as `auto`, it will be decided by\n            the norm_cfg. Bias of conv will be set as True if `norm_cfg` is\n            None, otherwise False. Default: \"auto\".\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        act_cfg (dict): Config dict for activation layer. Default: None.\n        loss_cls (dict): Config of classification loss.\n        loss_bbox (dict): Config of localization loss.\n        loss_obj (dict): Config of objectness loss.\n        loss_l1 (dict): Config of L1 loss.\n        train_cfg (dict): Training config of anchor head.\n        test_cfg (dict): Testing config of anchor head.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 in_channels,\n                 feat_channels=256,\n                 stacked_convs=2,\n                 strides=[8, 16, 32],\n                 use_depthwise=False,\n                 dcn_on_last_conv=False,\n                 conv_bias='auto',\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish'),\n                 loss_cls=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     reduction='sum',\n                     loss_weight=1.0),\n                 loss_bbox=dict(\n                     type='IoULoss',\n                     mode='square',\n                     eps=1e-16,\n                     reduction='sum',\n                     loss_weight=5.0),\n                 loss_obj=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     reduction='sum',\n                     loss_weight=1.0),\n                 loss_l1=dict(type='L1Loss', reduction='sum', loss_weight=1.0),\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=dict(\n                     type='Kaiming',\n                     layer='Conv2d',\n                     a=math.sqrt(5),\n                     distribution='uniform',\n                     mode='fan_in',\n                     nonlinearity='leaky_relu')):\n\n        super().__init__(init_cfg=init_cfg)\n        self.num_classes = num_classes\n        self.cls_out_channels = num_classes\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.stacked_convs = stacked_convs\n        self.strides = strides\n        self.use_depthwise = use_depthwise\n        self.dcn_on_last_conv = dcn_on_last_conv\n        assert conv_bias == 'auto' or isinstance(conv_bias, bool)\n        self.conv_bias = conv_bias\n        self.use_sigmoid_cls = True\n\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_bbox = build_loss(loss_bbox)\n        self.loss_obj = build_loss(loss_obj)\n\n        self.use_l1 = False  # This flag will be modified by hooks.\n        self.loss_l1 = build_loss(loss_l1)\n\n        self.prior_generator = MlvlPointGenerator(strides, offset=0)\n\n        self.test_cfg = test_cfg\n        self.train_cfg = train_cfg\n\n        self.sampling = False\n        if self.train_cfg:\n            self.assigner = build_assigner(self.train_cfg.assigner)\n            # sampling=False so use PseudoSampler\n            sampler_cfg = dict(type='PseudoSampler')\n            self.sampler = build_sampler(sampler_cfg, context=self)\n\n        self.fp16_enabled = False\n        self._init_layers()\n\n    def _init_layers(self):\n        self.multi_level_cls_convs = nn.ModuleList()\n        self.multi_level_reg_convs = nn.ModuleList()\n        self.multi_level_conv_cls = nn.ModuleList()\n        self.multi_level_conv_reg = nn.ModuleList()\n        self.multi_level_conv_obj = nn.ModuleList()\n        for _ in self.strides:\n            self.multi_level_cls_convs.append(self._build_stacked_convs())\n            self.multi_level_reg_convs.append(self._build_stacked_convs())\n            conv_cls, conv_reg, conv_obj = self._build_predictor()\n            self.multi_level_conv_cls.append(conv_cls)\n            self.multi_level_conv_reg.append(conv_reg)\n            self.multi_level_conv_obj.append(conv_obj)\n\n    def _build_stacked_convs(self):\n        \"\"\"Initialize conv layers of a single level head.\"\"\"\n        conv = DepthwiseSeparableConvModule \\\n            if self.use_depthwise else ConvModule\n        stacked_convs = []\n        for i in range(self.stacked_convs):\n            chn = self.in_channels if i == 0 else self.feat_channels\n            if self.dcn_on_last_conv and i == self.stacked_convs - 1:\n                conv_cfg = dict(type='DCNv2')\n            else:\n                conv_cfg = self.conv_cfg\n            stacked_convs.append(\n                conv(\n                    chn,\n                    self.feat_channels,\n                    3,\n                    stride=1,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    act_cfg=self.act_cfg,\n                    bias=self.conv_bias))\n        return nn.Sequential(*stacked_convs)\n\n    def _build_predictor(self):\n        \"\"\"Initialize predictor layers of a single level head.\"\"\"\n        conv_cls = nn.Conv2d(self.feat_channels, self.cls_out_channels, 1)\n        conv_reg = nn.Conv2d(self.feat_channels, 4, 1)\n        conv_obj = nn.Conv2d(self.feat_channels, 1, 1)\n        return conv_cls, conv_reg, conv_obj\n\n    def init_weights(self):\n        super(YOLOXHead, self).init_weights()\n        # Use prior in model initialization to improve stability\n        bias_init = bias_init_with_prob(0.01)\n        for conv_cls, conv_obj in zip(self.multi_level_conv_cls,\n                                      self.multi_level_conv_obj):\n            conv_cls.bias.data.fill_(bias_init)\n            conv_obj.bias.data.fill_(bias_init)\n\n    def forward_single(self, x, cls_convs, reg_convs, conv_cls, conv_reg,\n                       conv_obj):\n        \"\"\"Forward feature of a single scale level.\"\"\"\n\n        cls_feat = cls_convs(x)\n        reg_feat = reg_convs(x)\n\n        cls_score = conv_cls(cls_feat)\n        bbox_pred = conv_reg(reg_feat)\n        objectness = conv_obj(reg_feat)\n\n        return cls_score, bbox_pred, objectness\n\n    def forward(self, feats):\n        \"\"\"Forward features from the upstream network.\n\n        Args:\n            feats (tuple[Tensor]): Features from the upstream network, each is\n                a 4D-tensor.\n        Returns:\n            tuple[Tensor]: A tuple of multi-level predication map, each is a\n                4D-tensor of shape (batch_size, 5+num_classes, height, width).\n        \"\"\"\n\n        return multi_apply(self.forward_single, feats,\n                           self.multi_level_cls_convs,\n                           self.multi_level_reg_convs,\n                           self.multi_level_conv_cls,\n                           self.multi_level_conv_reg,\n                           self.multi_level_conv_obj)\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses'))\n    def get_bboxes(self,\n                   cls_scores,\n                   bbox_preds,\n                   objectnesses,\n                   img_metas=None,\n                   cfg=None,\n                   rescale=False,\n                   with_nms=True):\n        \"\"\"Transform network outputs of a batch into bbox results.\n        Args:\n            cls_scores (list[Tensor]): Classification scores for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * num_classes, H, W).\n            bbox_preds (list[Tensor]): Box energies / deltas for all\n                scale levels, each is a 4D-tensor, has shape\n                (batch_size, num_priors * 4, H, W).\n            objectnesses (list[Tensor], Optional): Score factor for\n                all scale level, each is a 4D-tensor, has shape\n                (batch_size, 1, H, W).\n            img_metas (list[dict], Optional): Image meta info. Default None.\n            cfg (mmcv.Config, Optional): Test / postprocessing configuration,\n                if None, test_cfg would be used.  Default None.\n            rescale (bool): If True, return boxes in original image space.\n                Default False.\n            with_nms (bool): If True, do nms before return boxes.\n                Default True.\n        Returns:\n            list[list[Tensor, Tensor]]: Each item in result_list is 2-tuple.\n                The first item is an (n, 5) tensor, where the first 4 columns\n                are bounding box positions (tl_x, tl_y, br_x, br_y) and the\n                5-th column is a score between 0 and 1. The second item is a\n                (n,) tensor where each item is the predicted class label of\n                the corresponding box.\n        \"\"\"\n        assert len(cls_scores) == len(bbox_preds) == len(objectnesses)\n        cfg = self.test_cfg if cfg is None else cfg\n        scale_factors = np.array(\n            [img_meta['scale_factor'] for img_meta in img_metas])\n\n        num_imgs = len(img_metas)\n        featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]\n        mlvl_priors = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=cls_scores[0].dtype,\n            device=cls_scores[0].device,\n            with_stride=True)\n\n        # flatten cls_scores, bbox_preds and objectness\n        flatten_cls_scores = [\n            cls_score.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                  self.cls_out_channels)\n            for cls_score in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_objectness = [\n            objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)\n            for objectness in objectnesses\n        ]\n\n        flatten_cls_scores = torch.cat(flatten_cls_scores, dim=1).sigmoid()\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)\n        flatten_objectness = torch.cat(flatten_objectness, dim=1).sigmoid()\n        flatten_priors = torch.cat(mlvl_priors)\n\n        flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds)\n\n        if rescale:\n            flatten_bboxes[..., :4] /= flatten_bboxes.new_tensor(\n                scale_factors).unsqueeze(1)\n\n        result_list = []\n        for img_id in range(len(img_metas)):\n            cls_scores = flatten_cls_scores[img_id]\n            score_factor = flatten_objectness[img_id]\n            bboxes = flatten_bboxes[img_id]\n\n            result_list.append(\n                self._bboxes_nms(cls_scores, bboxes, score_factor, cfg))\n\n        return result_list\n\n    def _bbox_decode(self, priors, bbox_preds):\n        xys = (bbox_preds[..., :2] * priors[:, 2:]) + priors[:, :2]\n        whs = bbox_preds[..., 2:].exp() * priors[:, 2:]\n\n        tl_x = (xys[..., 0] - whs[..., 0] / 2)\n        tl_y = (xys[..., 1] - whs[..., 1] / 2)\n        br_x = (xys[..., 0] + whs[..., 0] / 2)\n        br_y = (xys[..., 1] + whs[..., 1] / 2)\n\n        decoded_bboxes = torch.stack([tl_x, tl_y, br_x, br_y], -1)\n        return decoded_bboxes\n\n    def _bboxes_nms(self, cls_scores, bboxes, score_factor, cfg):\n        max_scores, labels = torch.max(cls_scores, 1)\n        valid_mask = score_factor * max_scores >= cfg.score_thr\n\n        bboxes = bboxes[valid_mask]\n        scores = max_scores[valid_mask] * score_factor[valid_mask]\n        labels = labels[valid_mask]\n\n        if labels.numel() == 0:\n            return bboxes, labels\n        else:\n            dets, keep = batched_nms(bboxes, scores, labels, cfg.nms)\n            return dets, labels[keep]\n\n    @force_fp32(apply_to=('cls_scores', 'bbox_preds', 'objectnesses'))\n    def loss(self,\n             cls_scores,\n             bbox_preds,\n             objectnesses,\n             gt_bboxes,\n             gt_labels,\n             img_metas,\n             gt_bboxes_ignore=None):\n        \"\"\"Compute loss of the head.\n        Args:\n            cls_scores (list[Tensor]): Box scores for each scale level,\n                each is a 4D-tensor, the channel number is\n                num_priors * num_classes.\n            bbox_preds (list[Tensor]): Box energies / deltas for each scale\n                level, each is a 4D-tensor, the channel number is\n                num_priors * 4.\n            objectnesses (list[Tensor], Optional): Score factor for\n                all scale level, each is a 4D-tensor, has shape\n                (batch_size, 1, H, W).\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n        \"\"\"\n        num_imgs = len(img_metas)\n        featmap_sizes = [cls_score.shape[2:] for cls_score in cls_scores]\n        mlvl_priors = self.prior_generator.grid_priors(\n            featmap_sizes,\n            dtype=cls_scores[0].dtype,\n            device=cls_scores[0].device,\n            with_stride=True)\n\n        flatten_cls_preds = [\n            cls_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1,\n                                                 self.cls_out_channels)\n            for cls_pred in cls_scores\n        ]\n        flatten_bbox_preds = [\n            bbox_pred.permute(0, 2, 3, 1).reshape(num_imgs, -1, 4)\n            for bbox_pred in bbox_preds\n        ]\n        flatten_objectness = [\n            objectness.permute(0, 2, 3, 1).reshape(num_imgs, -1)\n            for objectness in objectnesses\n        ]\n\n        flatten_cls_preds = torch.cat(flatten_cls_preds, dim=1)\n        flatten_bbox_preds = torch.cat(flatten_bbox_preds, dim=1)\n        flatten_objectness = torch.cat(flatten_objectness, dim=1)\n        flatten_priors = torch.cat(mlvl_priors)\n        flatten_bboxes = self._bbox_decode(flatten_priors, flatten_bbox_preds)\n\n        (pos_masks, cls_targets, obj_targets, bbox_targets, l1_targets,\n         num_fg_imgs) = multi_apply(\n             self._get_target_single, flatten_cls_preds.detach(),\n             flatten_objectness.detach(),\n             flatten_priors.unsqueeze(0).repeat(num_imgs, 1, 1),\n             flatten_bboxes.detach(), gt_bboxes, gt_labels)\n\n        # The experimental results show that ‘reduce_mean’ can improve\n        # performance on the COCO dataset.\n        num_pos = torch.tensor(\n            sum(num_fg_imgs),\n            dtype=torch.float,\n            device=flatten_cls_preds.device)\n        num_total_samples = max(reduce_mean(num_pos), 1.0)\n\n        pos_masks = torch.cat(pos_masks, 0)\n        cls_targets = torch.cat(cls_targets, 0)\n        obj_targets = torch.cat(obj_targets, 0)\n        bbox_targets = torch.cat(bbox_targets, 0)\n        if self.use_l1:\n            l1_targets = torch.cat(l1_targets, 0)\n\n        loss_bbox = self.loss_bbox(\n            flatten_bboxes.view(-1, 4)[pos_masks],\n            bbox_targets) / num_total_samples\n        loss_obj = self.loss_obj(flatten_objectness.view(-1, 1),\n                                 obj_targets) / num_total_samples\n        loss_cls = self.loss_cls(\n            flatten_cls_preds.view(-1, self.num_classes)[pos_masks],\n            cls_targets) / num_total_samples\n\n        loss_dict = dict(\n            loss_cls=loss_cls, loss_bbox=loss_bbox, loss_obj=loss_obj)\n\n        if self.use_l1:\n            loss_l1 = self.loss_l1(\n                flatten_bbox_preds.view(-1, 4)[pos_masks],\n                l1_targets) / num_total_samples\n            loss_dict.update(loss_l1=loss_l1)\n\n        return loss_dict\n\n    @torch.no_grad()\n    def _get_target_single(self, cls_preds, objectness, priors, decoded_bboxes,\n                           gt_bboxes, gt_labels):\n        \"\"\"Compute classification, regression, and objectness targets for\n        priors in a single image.\n        Args:\n            cls_preds (Tensor): Classification predictions of one image,\n                a 2D-Tensor with shape [num_priors, num_classes]\n            objectness (Tensor): Objectness predictions of one image,\n                a 1D-Tensor with shape [num_priors]\n            priors (Tensor): All priors of one image, a 2D-Tensor with shape\n                [num_priors, 4] in [cx, xy, stride_w, stride_y] format.\n            decoded_bboxes (Tensor): Decoded bboxes predictions of one image,\n                a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y,\n                br_x, br_y] format.\n            gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor\n                with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (Tensor): Ground truth labels of one image, a Tensor\n                with shape [num_gts].\n        \"\"\"\n\n        num_priors = priors.size(0)\n        num_gts = gt_labels.size(0)\n        gt_bboxes = gt_bboxes.to(decoded_bboxes.dtype)\n        # No target\n        if num_gts == 0:\n            cls_target = cls_preds.new_zeros((0, self.num_classes))\n            bbox_target = cls_preds.new_zeros((0, 4))\n            l1_target = cls_preds.new_zeros((0, 4))\n            obj_target = cls_preds.new_zeros((num_priors, 1))\n            foreground_mask = cls_preds.new_zeros(num_priors).bool()\n            return (foreground_mask, cls_target, obj_target, bbox_target,\n                    l1_target, 0)\n\n        # YOLOX uses center priors with 0.5 offset to assign targets,\n        # but use center priors without offset to regress bboxes.\n        offset_priors = torch.cat(\n            [priors[:, :2] + priors[:, 2:] * 0.5, priors[:, 2:]], dim=-1)\n\n        assign_result = self.assigner.assign(\n            cls_preds.sigmoid() * objectness.unsqueeze(1).sigmoid(),\n            offset_priors, decoded_bboxes, gt_bboxes, gt_labels)\n\n        sampling_result = self.sampler.sample(assign_result, priors, gt_bboxes)\n        pos_inds = sampling_result.pos_inds\n        num_pos_per_img = pos_inds.size(0)\n\n        pos_ious = assign_result.max_overlaps[pos_inds]\n        # IOU aware classification score\n        cls_target = F.one_hot(sampling_result.pos_gt_labels,\n                               self.num_classes) * pos_ious.unsqueeze(-1)\n        obj_target = torch.zeros_like(objectness).unsqueeze(-1)\n        obj_target[pos_inds] = 1\n        bbox_target = sampling_result.pos_gt_bboxes\n        l1_target = cls_preds.new_zeros((num_pos_per_img, 4))\n        if self.use_l1:\n            l1_target = self._get_l1_target(l1_target, bbox_target,\n                                            priors[pos_inds])\n        foreground_mask = torch.zeros_like(objectness).to(torch.bool)\n        foreground_mask[pos_inds] = 1\n        return (foreground_mask, cls_target, obj_target, bbox_target,\n                l1_target, num_pos_per_img)\n\n    def _get_l1_target(self, l1_target, gt_bboxes, priors, eps=1e-8):\n        \"\"\"Convert gt bboxes to center offset and log width height.\"\"\"\n        gt_cxcywh = bbox_xyxy_to_cxcywh(gt_bboxes)\n        l1_target[:, :2] = (gt_cxcywh[:, :2] - priors[:, :2]) / priors[:, 2:]\n        l1_target[:, 2:] = torch.log(gt_cxcywh[:, 2:] / priors[:, 2:] + eps)\n        return l1_target\n"
  },
  {
    "path": "mmdet/models/detectors/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .atss import ATSS\nfrom .autoassign import AutoAssign\nfrom .base import BaseDetector\nfrom .cascade_rcnn import CascadeRCNN\nfrom .centernet import CenterNet\nfrom .cornernet import CornerNet\nfrom .ddod import DDOD\nfrom .deformable_detr import DeformableDETR\nfrom .detr import DETR\nfrom .fast_rcnn import FastRCNN\nfrom .faster_rcnn import FasterRCNN\nfrom .fcos import FCOS\nfrom .fovea import FOVEA\nfrom .fsaf import FSAF\nfrom .gfl import GFL\nfrom .grid_rcnn import GridRCNN\nfrom .htc import HybridTaskCascade\nfrom .kd_one_stage import KnowledgeDistillationSingleStageDetector\nfrom .lad import LAD\nfrom .mask2former import Mask2Former\nfrom .mask_rcnn import MaskRCNN\nfrom .mask_scoring_rcnn import MaskScoringRCNN\nfrom .maskformer import MaskFormer\nfrom .nasfcos import NASFCOS\nfrom .paa import PAA\nfrom .panoptic_fpn import PanopticFPN\nfrom .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor\nfrom .point_rend import PointRend\nfrom .queryinst import QueryInst\nfrom .reppoints_detector import RepPointsDetector\nfrom .retinanet import RetinaNet\nfrom .rpn import RPN\nfrom .scnet import SCNet\nfrom .single_stage import SingleStageDetector\nfrom .solo import SOLO\nfrom .solov2 import SOLOv2\nfrom .sparse_rcnn import SparseRCNN\nfrom .tood import TOOD\nfrom .trident_faster_rcnn import TridentFasterRCNN\nfrom .two_stage import TwoStageDetector\nfrom .vfnet import VFNet\nfrom .yolact import YOLACT\nfrom .yolo import YOLOV3\nfrom .yolof import YOLOF\nfrom .yolox import YOLOX\n\n__all__ = [\n    'ATSS', 'BaseDetector', 'SingleStageDetector', 'TwoStageDetector', 'RPN',\n    'KnowledgeDistillationSingleStageDetector', 'FastRCNN', 'FasterRCNN',\n    'MaskRCNN', 'CascadeRCNN', 'HybridTaskCascade', 'RetinaNet', 'FCOS',\n    'GridRCNN', 'MaskScoringRCNN', 'RepPointsDetector', 'FOVEA', 'FSAF',\n    'NASFCOS', 'PointRend', 'GFL', 'CornerNet', 'PAA', 'YOLOV3', 'YOLACT',\n    'VFNet', 'DETR', 'TridentFasterRCNN', 'SparseRCNN', 'SCNet', 'SOLO',\n    'SOLOv2', 'DeformableDETR', 'AutoAssign', 'YOLOF', 'CenterNet', 'YOLOX',\n    'TwoStagePanopticSegmentor', 'PanopticFPN', 'QueryInst', 'LAD', 'TOOD',\n    'MaskFormer', 'DDOD', 'Mask2Former'\n]\n"
  },
  {
    "path": "mmdet/models/detectors/atss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass ATSS(SingleStageDetector):\n    \"\"\"Implementation of `ATSS <https://arxiv.org/abs/1912.02424>`_.\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(ATSS, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                   test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/autoassign.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass AutoAssign(SingleStageDetector):\n    \"\"\"Implementation of `AutoAssign: Differentiable Label Assignment for Dense\n    Object Detection <https://arxiv.org/abs/2007.03496>`_.\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None):\n        super(AutoAssign, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                         test_cfg, pretrained)\n"
  },
  {
    "path": "mmdet/models/detectors/base.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\nfrom collections import OrderedDict\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom mmcv.runner import BaseModule, auto_fp16\n\nfrom mmdet.core.visualization import imshow_det_bboxes\n\n\nclass BaseDetector(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for detectors.\"\"\"\n\n    def __init__(self, init_cfg=None):\n        super(BaseDetector, self).__init__(init_cfg)\n        self.fp16_enabled = False\n\n    @property\n    def with_neck(self):\n        \"\"\"bool: whether the detector has a neck\"\"\"\n        return hasattr(self, 'neck') and self.neck is not None\n\n    # TODO: these properties need to be carefully handled\n    # for both single stage & two stage detectors\n    @property\n    def with_shared_head(self):\n        \"\"\"bool: whether the detector has a shared head in the RoI Head\"\"\"\n        return hasattr(self, 'roi_head') and self.roi_head.with_shared_head\n\n    @property\n    def with_bbox(self):\n        \"\"\"bool: whether the detector has a bbox head\"\"\"\n        return ((hasattr(self, 'roi_head') and self.roi_head.with_bbox)\n                or (hasattr(self, 'bbox_head') and self.bbox_head is not None))\n\n    @property\n    def with_mask(self):\n        \"\"\"bool: whether the detector has a mask head\"\"\"\n        return ((hasattr(self, 'roi_head') and self.roi_head.with_mask)\n                or (hasattr(self, 'mask_head') and self.mask_head is not None))\n\n    @abstractmethod\n    def extract_feat(self, imgs):\n        \"\"\"Extract features from images.\"\"\"\n        pass\n\n    def extract_feats(self, imgs):\n        \"\"\"Extract features from multiple images.\n\n        Args:\n            imgs (list[torch.Tensor]): A list of images. The images are\n                augmented from the same image but in different ways.\n\n        Returns:\n            list[torch.Tensor]: Features of different images\n        \"\"\"\n        assert isinstance(imgs, list)\n        return [self.extract_feat(img) for img in imgs]\n\n    def forward_train(self, imgs, img_metas, **kwargs):\n        \"\"\"\n        Args:\n            img (Tensor): of shape (N, C, H, W) encoding input images.\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): List of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys, see\n                :class:`mmdet.datasets.pipelines.Collect`.\n            kwargs (keyword arguments): Specific to concrete implementation.\n        \"\"\"\n        # NOTE the batched image size information may be useful, e.g.\n        # in DETR, this is needed for the construction of masks, which is\n        # then used for the transformer_head.\n        batch_input_shape = tuple(imgs[0].size()[-2:])\n        for img_meta in img_metas:\n            img_meta['batch_input_shape'] = batch_input_shape\n\n    async def async_simple_test(self, img, img_metas, **kwargs):\n        raise NotImplementedError\n\n    @abstractmethod\n    def simple_test(self, img, img_metas, **kwargs):\n        pass\n\n    @abstractmethod\n    def aug_test(self, imgs, img_metas, **kwargs):\n        \"\"\"Test function with test time augmentation.\"\"\"\n        pass\n\n    async def aforward_test(self, *, img, img_metas, **kwargs):\n        for var, name in [(img, 'img'), (img_metas, 'img_metas')]:\n            if not isinstance(var, list):\n                raise TypeError(f'{name} must be a list, but got {type(var)}')\n\n        num_augs = len(img)\n        if num_augs != len(img_metas):\n            raise ValueError(f'num of augmentations ({len(img)}) '\n                             f'!= num of image metas ({len(img_metas)})')\n        # TODO: remove the restriction of samples_per_gpu == 1 when prepared\n        samples_per_gpu = img[0].size(0)\n        assert samples_per_gpu == 1\n\n        if num_augs == 1:\n            return await self.async_simple_test(img[0], img_metas[0], **kwargs)\n        else:\n            raise NotImplementedError\n\n    def forward_test(self, imgs, img_metas, **kwargs):\n        \"\"\"\n        Args:\n            imgs (List[Tensor]): the outer list indicates test-time\n                augmentations and inner Tensor should have a shape NxCxHxW,\n                which contains all images in the batch.\n            img_metas (List[List[dict]]): the outer list indicates test-time\n                augs (multiscale, flip, etc.) and the inner list indicates\n                images in a batch.\n        \"\"\"\n        for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:\n            if not isinstance(var, list):\n                raise TypeError(f'{name} must be a list, but got {type(var)}')\n\n        num_augs = len(imgs)\n        if num_augs != len(img_metas):\n            raise ValueError(f'num of augmentations ({len(imgs)}) '\n                             f'!= num of image meta ({len(img_metas)})')\n\n        # NOTE the batched image size information may be useful, e.g.\n        # in DETR, this is needed for the construction of masks, which is\n        # then used for the transformer_head.\n        for img, img_meta in zip(imgs, img_metas):\n            batch_size = len(img_meta)\n            for img_id in range(batch_size):\n                img_meta[img_id]['batch_input_shape'] = tuple(img.size()[-2:])\n\n        if num_augs == 1:\n            # proposals (List[List[Tensor]]): the outer list indicates\n            # test-time augs (multiscale, flip, etc.) and the inner list\n            # indicates images in a batch.\n            # The Tensor should have a shape Px4, where P is the number of\n            # proposals.\n            if 'proposals' in kwargs:\n                kwargs['proposals'] = kwargs['proposals'][0]\n            return self.simple_test(imgs[0], img_metas[0], **kwargs)\n        else:\n            assert imgs[0].size(0) == 1, 'aug test does not support ' \\\n                                         'inference with batch size ' \\\n                                         f'{imgs[0].size(0)}'\n            # TODO: support test augmentation for predefined proposals\n            assert 'proposals' not in kwargs\n            return self.aug_test(imgs, img_metas, **kwargs)\n\n    @auto_fp16(apply_to=('img', ))\n    def forward(self, img, img_metas, return_loss=True, **kwargs):\n        \"\"\"Calls either :func:`forward_train` or :func:`forward_test` depending\n        on whether ``return_loss`` is ``True``.\n\n        Note this setting will change the expected inputs. When\n        ``return_loss=True``, img and img_meta are single-nested (i.e. Tensor\n        and List[dict]), and when ``resturn_loss=False``, img and img_meta\n        should be double nested (i.e.  List[Tensor], List[List[dict]]), with\n        the outer list indicating test time augmentations.\n        \"\"\"\n        if torch.onnx.is_in_onnx_export():\n            assert len(img_metas) == 1\n            return self.onnx_export(img[0], img_metas[0])\n\n        if return_loss:\n            return self.forward_train(img, img_metas, **kwargs)\n        else:\n            return self.forward_test(img, img_metas, **kwargs)\n\n    def _parse_losses(self, losses):\n        \"\"\"Parse the raw outputs (losses) of the network.\n\n        Args:\n            losses (dict): Raw output of the network, which usually contain\n                losses and other necessary information.\n\n        Returns:\n            tuple[Tensor, dict]: (loss, log_vars), loss is the loss tensor \\\n                which may be a weighted sum of all losses, log_vars contains \\\n                all the variables to be sent to the logger.\n        \"\"\"\n        log_vars = OrderedDict()\n        for loss_name, loss_value in losses.items():\n            if isinstance(loss_value, torch.Tensor):\n                log_vars[loss_name] = loss_value.mean()\n            elif isinstance(loss_value, list):\n                log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)\n            else:\n                raise TypeError(\n                    f'{loss_name} is not a tensor or list of tensors')\n\n        loss = sum(_value for _key, _value in log_vars.items()\n                   if 'loss' in _key)\n\n        # If the loss_vars has different length, GPUs will wait infinitely\n        if dist.is_available() and dist.is_initialized():\n            log_var_length = torch.tensor(len(log_vars), device=loss.device)\n            dist.all_reduce(log_var_length)\n            message = (f'rank {dist.get_rank()}' +\n                       f' len(log_vars): {len(log_vars)}' + ' keys: ' +\n                       ','.join(log_vars.keys()))\n            assert log_var_length == len(log_vars) * dist.get_world_size(), \\\n                'loss log variables are different across GPUs!\\n' + message\n\n        log_vars['loss'] = loss\n        for loss_name, loss_value in log_vars.items():\n            # reduce loss when distributed training\n            if dist.is_available() and dist.is_initialized():\n                loss_value = loss_value.data.clone()\n                dist.all_reduce(loss_value.div_(dist.get_world_size()))\n            log_vars[loss_name] = loss_value.item()\n\n        return loss, log_vars\n\n    def train_step(self, data, optimizer):\n        \"\"\"The iteration step during training.\n\n        This method defines an iteration step during training, except for the\n        back propagation and optimizer updating, which are done in an optimizer\n        hook. Note that in some complicated cases or models, the whole process\n        including back propagation and optimizer updating is also defined in\n        this method, such as GAN.\n\n        Args:\n            data (dict): The output of dataloader.\n            optimizer (:obj:`torch.optim.Optimizer` | dict): The optimizer of\n                runner is passed to ``train_step()``. This argument is unused\n                and reserved.\n\n        Returns:\n            dict: It should contain at least 3 keys: ``loss``, ``log_vars``, \\\n                ``num_samples``.\n\n                - ``loss`` is a tensor for back propagation, which can be a\n                  weighted sum of multiple losses.\n                - ``log_vars`` contains all the variables to be sent to the\n                  logger.\n                - ``num_samples`` indicates the batch size (when the model is\n                  DDP, it means the batch size on each GPU), which is used for\n                  averaging the logs.\n        \"\"\"\n        losses = self(**data)\n        loss, log_vars = self._parse_losses(losses)\n\n        outputs = dict(\n            loss=loss, log_vars=log_vars, num_samples=len(data['img_metas']))\n\n        return outputs\n\n    def val_step(self, data, optimizer=None):\n        \"\"\"The iteration step during validation.\n\n        This method shares the same signature as :func:`train_step`, but used\n        during val epochs. Note that the evaluation after training epochs is\n        not implemented with this method, but an evaluation hook.\n        \"\"\"\n        losses = self(**data)\n        loss, log_vars = self._parse_losses(losses)\n\n        log_vars_ = dict()\n        for loss_name, loss_value in log_vars.items():\n            k = loss_name + '_val'\n            log_vars_[k] = loss_value\n\n        outputs = dict(\n            loss=loss, log_vars=log_vars_, num_samples=len(data['img_metas']))\n\n        return outputs\n\n    def show_result(self,\n                    img,\n                    result,\n                    score_thr=0.3,\n                    bbox_color=(72, 101, 241),\n                    text_color=(72, 101, 241),\n                    mask_color=None,\n                    thickness=2,\n                    font_size=13,\n                    win_name='',\n                    show=False,\n                    wait_time=0,\n                    out_file=None):\n        \"\"\"Draw `result` over `img`.\n\n        Args:\n            img (str or Tensor): The image to be displayed.\n            result (Tensor or tuple): The results to draw over `img`\n                bbox_result or (bbox_result, segm_result).\n            score_thr (float, optional): Minimum score of bboxes to be shown.\n                Default: 0.3.\n            bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.\n               The tuple of color should be in BGR order. Default: 'green'\n            text_color (str or tuple(int) or :obj:`Color`):Color of texts.\n               The tuple of color should be in BGR order. Default: 'green'\n            mask_color (None or str or tuple(int) or :obj:`Color`):\n               Color of masks. The tuple of color should be in BGR order.\n               Default: None\n            thickness (int): Thickness of lines. Default: 2\n            font_size (int): Font size of texts. Default: 13\n            win_name (str): The window name. Default: ''\n            wait_time (float): Value of waitKey param.\n                Default: 0.\n            show (bool): Whether to show the image.\n                Default: False.\n            out_file (str or None): The filename to write the image.\n                Default: None.\n\n        Returns:\n            img (Tensor): Only if not `show` or `out_file`\n        \"\"\"\n        img = mmcv.imread(img)\n        img = img.copy()\n        if isinstance(result, tuple):\n            bbox_result, segm_result = result\n            if isinstance(segm_result, tuple):\n                segm_result = segm_result[0]  # ms rcnn\n        else:\n            bbox_result, segm_result = result, None\n        bboxes = np.vstack(bbox_result)\n        labels = [\n            np.full(bbox.shape[0], i, dtype=np.int32)\n            for i, bbox in enumerate(bbox_result)\n        ]\n        labels = np.concatenate(labels)\n        # draw segmentation masks\n        segms = None\n        if segm_result is not None and len(labels) > 0:  # non empty\n            segms = mmcv.concat_list(segm_result)\n            if isinstance(segms[0], torch.Tensor):\n                segms = torch.stack(segms, dim=0).detach().cpu().numpy()\n            else:\n                segms = np.stack(segms, axis=0)\n        # if out_file specified, do not show image in window\n        if out_file is not None:\n            show = False\n        # draw bounding boxes\n        img = imshow_det_bboxes(\n            img,\n            bboxes,\n            labels,\n            segms,\n            class_names=self.CLASSES,\n            score_thr=score_thr,\n            bbox_color=bbox_color,\n            text_color=text_color,\n            mask_color=mask_color,\n            thickness=thickness,\n            font_size=font_size,\n            win_name=win_name,\n            show=show,\n            wait_time=wait_time,\n            out_file=out_file)\n\n        if not (show or out_file):\n            return img\n\n    def onnx_export(self, img, img_metas):\n        raise NotImplementedError(f'{self.__class__.__name__} does '\n                                  f'not support ONNX EXPORT')\n"
  },
  {
    "path": "mmdet/models/detectors/cascade_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .two_stage import TwoStageDetector\n\n\n@DETECTORS.register_module()\nclass CascadeRCNN(TwoStageDetector):\n    r\"\"\"Implementation of `Cascade R-CNN: Delving into High Quality Object\n    Detection <https://arxiv.org/abs/1906.09756>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck=None,\n                 rpn_head=None,\n                 roi_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(CascadeRCNN, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n\n    def show_result(self, data, result, **kwargs):\n        \"\"\"Show prediction results of the detector.\n\n        Args:\n            data (str or np.ndarray): Image filename or loaded image.\n            result (Tensor or tuple): The results to draw over `img`\n                bbox_result or (bbox_result, segm_result).\n\n        Returns:\n            np.ndarray: The image with bboxes drawn on it.\n        \"\"\"\n        if self.with_mask:\n            ms_bbox_result, ms_segm_result = result\n            if isinstance(ms_bbox_result, dict):\n                result = (ms_bbox_result['ensemble'],\n                          ms_segm_result['ensemble'])\n        else:\n            if isinstance(result, dict):\n                result = result['ensemble']\n        return super(CascadeRCNN, self).show_result(data, result, **kwargs)\n"
  },
  {
    "path": "mmdet/models/detectors/centernet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.core import bbox2result\nfrom mmdet.models.builder import DETECTORS\nfrom ...core.utils import flip_tensor\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass CenterNet(SingleStageDetector):\n    \"\"\"Implementation of CenterNet(Objects as Points)\n\n    <https://arxiv.org/abs/1904.07850>.\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(CenterNet, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                        test_cfg, pretrained, init_cfg)\n\n    def merge_aug_results(self, aug_results, with_nms):\n        \"\"\"Merge augmented detection bboxes and score.\n\n        Args:\n            aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each\n                image.\n            with_nms (bool): If True, do nms before return boxes.\n\n        Returns:\n            tuple: (out_bboxes, out_labels)\n        \"\"\"\n        recovered_bboxes, aug_labels = [], []\n        for single_result in aug_results:\n            recovered_bboxes.append(single_result[0][0])\n            aug_labels.append(single_result[0][1])\n\n        bboxes = torch.cat(recovered_bboxes, dim=0).contiguous()\n        labels = torch.cat(aug_labels).contiguous()\n        if with_nms:\n            out_bboxes, out_labels = self.bbox_head._bboxes_nms(\n                bboxes, labels, self.bbox_head.test_cfg)\n        else:\n            out_bboxes, out_labels = bboxes, labels\n\n        return out_bboxes, out_labels\n\n    def aug_test(self, imgs, img_metas, rescale=True):\n        \"\"\"Augment testing of CenterNet. Aug test must have flipped image pair,\n        and unlike CornerNet, it will perform an averaging operation on the\n        feature map instead of detecting bbox.\n\n        Args:\n            imgs (list[Tensor]): Augmented images.\n            img_metas (list[list[dict]]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If True, return boxes in original image space.\n                Default: True.\n\n        Note:\n            ``imgs`` must including flipped image pairs.\n\n        Returns:\n            list[list[np.ndarray]]: BBox results of each image and classes.\n                The outer list corresponds to each image. The inner list\n                corresponds to each class.\n        \"\"\"\n        img_inds = list(range(len(imgs)))\n        assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (\n            'aug test must have flipped image pair')\n        aug_results = []\n        for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):\n            flip_direction = img_metas[flip_ind][0]['flip_direction']\n            img_pair = torch.cat([imgs[ind], imgs[flip_ind]])\n            x = self.extract_feat(img_pair)\n            center_heatmap_preds, wh_preds, offset_preds = self.bbox_head(x)\n            assert len(center_heatmap_preds) == len(wh_preds) == len(\n                offset_preds) == 1\n\n            # Feature map averaging\n            center_heatmap_preds[0] = (\n                center_heatmap_preds[0][0:1] +\n                flip_tensor(center_heatmap_preds[0][1:2], flip_direction)) / 2\n            wh_preds[0] = (wh_preds[0][0:1] +\n                           flip_tensor(wh_preds[0][1:2], flip_direction)) / 2\n\n            bbox_list = self.bbox_head.get_bboxes(\n                center_heatmap_preds,\n                wh_preds, [offset_preds[0][0:1]],\n                img_metas[ind],\n                rescale=rescale,\n                with_nms=False)\n            aug_results.append(bbox_list)\n\n        nms_cfg = self.bbox_head.test_cfg.get('nms_cfg', None)\n        if nms_cfg is None:\n            with_nms = False\n        else:\n            with_nms = True\n        bbox_list = [self.merge_aug_results(aug_results, with_nms)]\n        bbox_results = [\n            bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n            for det_bboxes, det_labels in bbox_list\n        ]\n        return bbox_results\n"
  },
  {
    "path": "mmdet/models/detectors/cornernet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.core import bbox2result, bbox_mapping_back\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass CornerNet(SingleStageDetector):\n    \"\"\"CornerNet.\n\n    This detector is the implementation of the paper `CornerNet: Detecting\n    Objects as Paired Keypoints <https://arxiv.org/abs/1808.01244>`_ .\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(CornerNet, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                        test_cfg, pretrained, init_cfg)\n\n    def merge_aug_results(self, aug_results, img_metas):\n        \"\"\"Merge augmented detection bboxes and score.\n\n        Args:\n            aug_results (list[list[Tensor]]): Det_bboxes and det_labels of each\n                image.\n            img_metas (list[list[dict]]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n\n        Returns:\n            tuple: (bboxes, labels)\n        \"\"\"\n        recovered_bboxes, aug_labels = [], []\n        for bboxes_labels, img_info in zip(aug_results, img_metas):\n            img_shape = img_info[0]['img_shape']  # using shape before padding\n            scale_factor = img_info[0]['scale_factor']\n            flip = img_info[0]['flip']\n            bboxes, labels = bboxes_labels\n            bboxes, scores = bboxes[:, :4], bboxes[:, -1:]\n            bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip)\n            recovered_bboxes.append(torch.cat([bboxes, scores], dim=-1))\n            aug_labels.append(labels)\n\n        bboxes = torch.cat(recovered_bboxes, dim=0)\n        labels = torch.cat(aug_labels)\n\n        if bboxes.shape[0] > 0:\n            out_bboxes, out_labels = self.bbox_head._bboxes_nms(\n                bboxes, labels, self.bbox_head.test_cfg)\n        else:\n            out_bboxes, out_labels = bboxes, labels\n\n        return out_bboxes, out_labels\n\n    def aug_test(self, imgs, img_metas, rescale=False):\n        \"\"\"Augment testing of CornerNet.\n\n        Args:\n            imgs (list[Tensor]): Augmented images.\n            img_metas (list[list[dict]]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n\n        Note:\n            ``imgs`` must including flipped image pairs.\n\n        Returns:\n            list[list[np.ndarray]]: BBox results of each image and classes.\n                The outer list corresponds to each image. The inner list\n                corresponds to each class.\n        \"\"\"\n        img_inds = list(range(len(imgs)))\n\n        assert img_metas[0][0]['flip'] + img_metas[1][0]['flip'], (\n            'aug test must have flipped image pair')\n        aug_results = []\n        for ind, flip_ind in zip(img_inds[0::2], img_inds[1::2]):\n            img_pair = torch.cat([imgs[ind], imgs[flip_ind]])\n            x = self.extract_feat(img_pair)\n            outs = self.bbox_head(x)\n            bbox_list = self.bbox_head.get_bboxes(\n                *outs, [img_metas[ind], img_metas[flip_ind]], False, False)\n            aug_results.append(bbox_list[0])\n            aug_results.append(bbox_list[1])\n\n        bboxes, labels = self.merge_aug_results(aug_results, img_metas)\n        bbox_results = bbox2result(bboxes, labels, self.bbox_head.num_classes)\n\n        return [bbox_results]\n"
  },
  {
    "path": "mmdet/models/detectors/ddod.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass DDOD(SingleStageDetector):\n    \"\"\"Implementation of `DDOD <https://arxiv.org/pdf/2107.02963.pdf>`_.\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(DDOD, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                   test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/deformable_detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .detr import DETR\n\n\n@DETECTORS.register_module()\nclass DeformableDETR(DETR):\n\n    def __init__(self, *args, **kwargs):\n        super(DETR, self).__init__(*args, **kwargs)\n"
  },
  {
    "path": "mmdet/models/detectors/detr.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\n\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass DETR(SingleStageDetector):\n    r\"\"\"Implementation of `DETR: End-to-End Object Detection with\n    Transformers <https://arxiv.org/pdf/2005.12872>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,\n                                   test_cfg, pretrained, init_cfg)\n\n    # over-write `forward_dummy` because:\n    # the forward of bbox_head requires img_metas\n    def forward_dummy(self, img):\n        \"\"\"Used for computing network flops.\n\n        See `mmdetection/tools/analysis_tools/get_flops.py`\n        \"\"\"\n        warnings.warn('Warning! MultiheadAttention in DETR does not '\n                      'support flops computation! Do not use the '\n                      'results in your papers!')\n\n        batch_size, _, height, width = img.shape\n        dummy_img_metas = [\n            dict(\n                batch_input_shape=(height, width),\n                img_shape=(height, width, 3)) for _ in range(batch_size)\n        ]\n        x = self.extract_feat(img)\n        outs = self.bbox_head(x, dummy_img_metas)\n        return outs\n\n    # over-write `onnx_export` because:\n    # (1) the forward of bbox_head requires img_metas\n    # (2) the different behavior (e.g. construction of `masks`) between\n    # torch and ONNX model, during the forward of bbox_head\n    def onnx_export(self, img, img_metas):\n        \"\"\"Test function for exporting to ONNX, without test time augmentation.\n\n        Args:\n            img (torch.Tensor): input images.\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n                and class labels of shape [N, num_det].\n        \"\"\"\n        x = self.extract_feat(img)\n        # forward of this head requires img_metas\n        outs = self.bbox_head.forward_onnx(x, img_metas)\n        # get shape as tensor\n        img_shape = torch._shape_as_tensor(img)[2:]\n        img_metas[0]['img_shape_for_onnx'] = img_shape\n\n        det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)\n\n        return det_bboxes, det_labels\n"
  },
  {
    "path": "mmdet/models/detectors/fast_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .two_stage import TwoStageDetector\n\n\n@DETECTORS.register_module()\nclass FastRCNN(TwoStageDetector):\n    \"\"\"Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 roi_head,\n                 train_cfg,\n                 test_cfg,\n                 neck=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(FastRCNN, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n\n    def forward_test(self, imgs, img_metas, proposals, **kwargs):\n        \"\"\"\n        Args:\n            imgs (List[Tensor]): the outer list indicates test-time\n                augmentations and inner Tensor should have a shape NxCxHxW,\n                which contains all images in the batch.\n            img_metas (List[List[dict]]): the outer list indicates test-time\n                augs (multiscale, flip, etc.) and the inner list indicates\n                images in a batch.\n            proposals (List[List[Tensor]]): the outer list indicates test-time\n                augs (multiscale, flip, etc.) and the inner list indicates\n                images in a batch. The Tensor should have a shape Px4, where\n                P is the number of proposals.\n        \"\"\"\n        for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:\n            if not isinstance(var, list):\n                raise TypeError(f'{name} must be a list, but got {type(var)}')\n\n        num_augs = len(imgs)\n        if num_augs != len(img_metas):\n            raise ValueError(f'num of augmentations ({len(imgs)}) '\n                             f'!= num of image meta ({len(img_metas)})')\n\n        if num_augs == 1:\n            return self.simple_test(imgs[0], img_metas[0], proposals[0],\n                                    **kwargs)\n        else:\n            # TODO: support test-time augmentation\n            assert NotImplementedError\n"
  },
  {
    "path": "mmdet/models/detectors/faster_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .two_stage import TwoStageDetector\n\n\n@DETECTORS.register_module()\nclass FasterRCNN(TwoStageDetector):\n    \"\"\"Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 rpn_head,\n                 roi_head,\n                 train_cfg,\n                 test_cfg,\n                 neck=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(FasterRCNN, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/fcos.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass FCOS(SingleStageDetector):\n    \"\"\"Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                   test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/fovea.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass FOVEA(SingleStageDetector):\n    \"\"\"Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                    test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/fsaf.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass FSAF(SingleStageDetector):\n    \"\"\"Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                   test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/gfl.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass GFL(SingleStageDetector):\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                  test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/grid_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .two_stage import TwoStageDetector\n\n\n@DETECTORS.register_module()\nclass GridRCNN(TwoStageDetector):\n    \"\"\"Grid R-CNN.\n\n    This detector is the implementation of:\n    - Grid R-CNN (https://arxiv.org/abs/1811.12030)\n    - Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 rpn_head,\n                 roi_head,\n                 train_cfg,\n                 test_cfg,\n                 neck=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(GridRCNN, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/htc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .cascade_rcnn import CascadeRCNN\n\n\n@DETECTORS.register_module()\nclass HybridTaskCascade(CascadeRCNN):\n    \"\"\"Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_\"\"\"\n\n    def __init__(self, **kwargs):\n        super(HybridTaskCascade, self).__init__(**kwargs)\n\n    @property\n    def with_semantic(self):\n        \"\"\"bool: whether the detector has a semantic head\"\"\"\n        return self.roi_head.with_semantic\n"
  },
  {
    "path": "mmdet/models/detectors/kd_one_stage.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom pathlib import Path\n\nimport mmcv\nimport torch\nfrom mmcv.runner import load_checkpoint\n\nfrom .. import build_detector\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass KnowledgeDistillationSingleStageDetector(SingleStageDetector):\n    r\"\"\"Implementation of `Distilling the Knowledge in a Neural Network.\n    <https://arxiv.org/abs/1503.02531>`_.\n\n    Args:\n        teacher_config (str | dict): Config file path\n            or the config object of teacher model.\n        teacher_ckpt (str, optional): Checkpoint path of teacher model.\n            If left as None, the model will not load any weights.\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 teacher_config,\n                 teacher_ckpt=None,\n                 eval_teacher=True,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None):\n        super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,\n                         pretrained)\n        self.eval_teacher = eval_teacher\n        # Build teacher model\n        if isinstance(teacher_config, (str, Path)):\n            teacher_config = mmcv.Config.fromfile(teacher_config)\n        self.teacher_model = build_detector(teacher_config['model'])\n        if teacher_ckpt is not None:\n            load_checkpoint(\n                self.teacher_model, teacher_ckpt, map_location='cpu')\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None):\n        \"\"\"\n        Args:\n            img (Tensor): Input images of shape (N, C, H, W).\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): A List of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                :class:`mmdet.datasets.pipelines.Collect`.\n            gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): Class indices corresponding to each box\n            gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n                boxes can be ignored when computing the loss.\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        x = self.extract_feat(img)\n        with torch.no_grad():\n            teacher_x = self.teacher_model.extract_feat(img)\n            out_teacher = self.teacher_model.bbox_head(teacher_x)\n        losses = self.bbox_head.forward_train(x, out_teacher, img_metas,\n                                              gt_bboxes, gt_labels,\n                                              gt_bboxes_ignore)\n        return losses\n\n    def cuda(self, device=None):\n        \"\"\"Since teacher_model is registered as a plain object, it is necessary\n        to put the teacher model to cuda when calling cuda function.\"\"\"\n        self.teacher_model.cuda(device=device)\n        return super().cuda(device=device)\n\n    def train(self, mode=True):\n        \"\"\"Set the same train mode for teacher and student model.\"\"\"\n        if self.eval_teacher:\n            self.teacher_model.train(False)\n        else:\n            self.teacher_model.train(mode)\n        super().train(mode)\n\n    def __setattr__(self, name, value):\n        \"\"\"Set attribute, i.e. self.name = value\n\n        This reloading prevent the teacher model from being registered as a\n        nn.Module. The teacher module is registered as a plain object, so that\n        the teacher parameters will not show up when calling\n        ``self.parameters``, ``self.modules``, ``self.children`` methods.\n        \"\"\"\n        if name == 'teacher_model':\n            object.__setattr__(self, name, value)\n        else:\n            super().__setattr__(name, value)\n"
  },
  {
    "path": "mmdet/models/detectors/lad.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner import load_checkpoint\n\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .kd_one_stage import KnowledgeDistillationSingleStageDetector\n\n\n@DETECTORS.register_module()\nclass LAD(KnowledgeDistillationSingleStageDetector):\n    \"\"\"Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_.\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 teacher_backbone,\n                 teacher_neck,\n                 teacher_bbox_head,\n                 teacher_ckpt,\n                 eval_teacher=True,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None):\n        super(KnowledgeDistillationSingleStageDetector,\n              self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,\n                             pretrained)\n        self.eval_teacher = eval_teacher\n        self.teacher_model = nn.Module()\n        self.teacher_model.backbone = build_backbone(teacher_backbone)\n        if teacher_neck is not None:\n            self.teacher_model.neck = build_neck(teacher_neck)\n        teacher_bbox_head.update(train_cfg=train_cfg)\n        teacher_bbox_head.update(test_cfg=test_cfg)\n        self.teacher_model.bbox_head = build_head(teacher_bbox_head)\n        if teacher_ckpt is not None:\n            load_checkpoint(\n                self.teacher_model, teacher_ckpt, map_location='cpu')\n\n    @property\n    def with_teacher_neck(self):\n        \"\"\"bool: whether the detector has a teacher_neck\"\"\"\n        return hasattr(self.teacher_model, 'neck') and \\\n            self.teacher_model.neck is not None\n\n    def extract_teacher_feat(self, img):\n        \"\"\"Directly extract teacher features from the backbone+neck.\"\"\"\n        x = self.teacher_model.backbone(img)\n        if self.with_teacher_neck:\n            x = self.teacher_model.neck(x)\n        return x\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None):\n        \"\"\"\n        Args:\n            img (Tensor): Input images of shape (N, C, H, W).\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): A List of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                :class:`mmdet.datasets.pipelines.Collect`.\n            gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): Class indices corresponding to each box\n            gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        # get label assignment from the teacher\n        with torch.no_grad():\n            x_teacher = self.extract_teacher_feat(img)\n            outs_teacher = self.teacher_model.bbox_head(x_teacher)\n            label_assignment_results = \\\n                self.teacher_model.bbox_head.get_label_assignment(\n                    *outs_teacher, gt_bboxes, gt_labels, img_metas,\n                    gt_bboxes_ignore)\n\n        # the student use the label assignment from the teacher to learn\n        x = self.extract_feat(img)\n        losses = self.bbox_head.forward_train(x, label_assignment_results,\n                                              img_metas, gt_bboxes, gt_labels,\n                                              gt_bboxes_ignore)\n        return losses\n"
  },
  {
    "path": "mmdet/models/detectors/mask2former.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .maskformer import MaskFormer\n\n\n@DETECTORS.register_module()\nclass Mask2Former(MaskFormer):\n    r\"\"\"Implementation of `Masked-attention Mask\n    Transformer for Universal Image Segmentation\n    <https://arxiv.org/pdf/2112.01527>`_.\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck=None,\n                 panoptic_head=None,\n                 panoptic_fusion_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=None):\n        super().__init__(\n            backbone,\n            neck=neck,\n            panoptic_head=panoptic_head,\n            panoptic_fusion_head=panoptic_fusion_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/mask_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .two_stage import TwoStageDetector\n\n\n@DETECTORS.register_module()\nclass MaskRCNN(TwoStageDetector):\n    \"\"\"Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 rpn_head,\n                 roi_head,\n                 train_cfg,\n                 test_cfg,\n                 neck=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(MaskRCNN, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/mask_scoring_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .two_stage import TwoStageDetector\n\n\n@DETECTORS.register_module()\nclass MaskScoringRCNN(TwoStageDetector):\n    \"\"\"Mask Scoring RCNN.\n\n    https://arxiv.org/abs/1903.00241\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 rpn_head,\n                 roi_head,\n                 train_cfg,\n                 test_cfg,\n                 neck=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(MaskScoringRCNN, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/maskformer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport mmcv\nimport numpy as np\n\nfrom mmdet.core import INSTANCE_OFFSET, bbox2result\nfrom mmdet.core.visualization import imshow_det_bboxes\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass MaskFormer(SingleStageDetector):\n    r\"\"\"Implementation of `Per-Pixel Classification is\n    NOT All You Need for Semantic Segmentation\n    <https://arxiv.org/pdf/2107.06278>`_.\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck=None,\n                 panoptic_head=None,\n                 panoptic_fusion_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=None):\n        super(SingleStageDetector, self).__init__(init_cfg=init_cfg)\n        self.backbone = build_backbone(backbone)\n        if neck is not None:\n            self.neck = build_neck(neck)\n\n        panoptic_head_ = copy.deepcopy(panoptic_head)\n        panoptic_head_.update(train_cfg=train_cfg)\n        panoptic_head_.update(test_cfg=test_cfg)\n        self.panoptic_head = build_head(panoptic_head_)\n\n        panoptic_fusion_head_ = copy.deepcopy(panoptic_fusion_head)\n        panoptic_fusion_head_.update(test_cfg=test_cfg)\n        self.panoptic_fusion_head = build_head(panoptic_fusion_head_)\n\n        self.num_things_classes = self.panoptic_head.num_things_classes\n        self.num_stuff_classes = self.panoptic_head.num_stuff_classes\n        self.num_classes = self.panoptic_head.num_classes\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n        # BaseDetector.show_result default for instance segmentation\n        if self.num_stuff_classes > 0:\n            self.show_result = self._show_pan_result\n\n    def forward_dummy(self, img, img_metas):\n        \"\"\"Used for computing network flops. See\n        `mmdetection/tools/analysis_tools/get_flops.py`\n\n        Args:\n            img (Tensor): of shape (N, C, H, W) encoding input images.\n                Typically these should be mean centered and std scaled.\n            img_metas (list[Dict]): list of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n        \"\"\"\n        super(SingleStageDetector, self).forward_train(img, img_metas)\n        x = self.extract_feat(img)\n        outs = self.panoptic_head(x, img_metas)\n        return outs\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_masks,\n                      gt_semantic_seg=None,\n                      gt_bboxes_ignore=None,\n                      **kargs):\n        \"\"\"\n        Args:\n            img (Tensor): of shape (N, C, H, W) encoding input images.\n                Typically these should be mean centered and std scaled.\n            img_metas (list[Dict]): list of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box.\n            gt_masks (list[BitmapMasks]): true segmentation masks for each box\n                used if the architecture supports a segmentation task.\n            gt_semantic_seg (list[tensor]): semantic segmentation mask for\n                images for panoptic segmentation.\n                Defaults to None for instance segmentation.\n            gt_bboxes_ignore (list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n                Defaults to None.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        # add batch_input_shape in img_metas\n        super(SingleStageDetector, self).forward_train(img, img_metas)\n        x = self.extract_feat(img)\n        losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes,\n                                                  gt_labels, gt_masks,\n                                                  gt_semantic_seg,\n                                                  gt_bboxes_ignore)\n\n        return losses\n\n    def simple_test(self, imgs, img_metas, **kwargs):\n        \"\"\"Test without augmentation.\n\n        Args:\n            imgs (Tensor): A batch of images.\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            list[dict[str, np.array | tuple[list]] | tuple[list]]:\n                Semantic segmentation results and panoptic segmentation \\\n                results of each image for panoptic segmentation, or formatted \\\n                bbox and mask results of each image for instance segmentation.\n\n            .. code-block:: none\n\n                [\n                    # panoptic segmentation\n                    {\n                        'pan_results': np.array, # shape = [h, w]\n                        'ins_results': tuple[list],\n                        # semantic segmentation results are not supported yet\n                        'sem_results': np.array\n                    },\n                    ...\n                ]\n\n            or\n\n            .. code-block:: none\n\n                [\n                    # instance segmentation\n                    (\n                        bboxes, # list[np.array]\n                        masks # list[list[np.array]]\n                    ),\n                    ...\n                ]\n        \"\"\"\n        feats = self.extract_feat(imgs)\n        mask_cls_results, mask_pred_results = self.panoptic_head.simple_test(\n            feats, img_metas, **kwargs)\n        results = self.panoptic_fusion_head.simple_test(\n            mask_cls_results, mask_pred_results, img_metas, **kwargs)\n        for i in range(len(results)):\n            if 'pan_results' in results[i]:\n                results[i]['pan_results'] = results[i]['pan_results'].detach(\n                ).cpu().numpy()\n\n            if 'ins_results' in results[i]:\n                labels_per_image, bboxes, mask_pred_binary = results[i][\n                    'ins_results']\n                bbox_results = bbox2result(bboxes, labels_per_image,\n                                           self.num_things_classes)\n                mask_results = [[] for _ in range(self.num_things_classes)]\n                for j, label in enumerate(labels_per_image):\n                    mask = mask_pred_binary[j].detach().cpu().numpy()\n                    mask_results[label].append(mask)\n                results[i]['ins_results'] = bbox_results, mask_results\n\n            assert 'sem_results' not in results[i], 'segmantic segmentation '\\\n                'results are not supported yet.'\n\n        if self.num_stuff_classes == 0:\n            results = [res['ins_results'] for res in results]\n\n        return results\n\n    def aug_test(self, imgs, img_metas, **kwargs):\n        raise NotImplementedError\n\n    def onnx_export(self, img, img_metas):\n        raise NotImplementedError\n\n    def _show_pan_result(self,\n                         img,\n                         result,\n                         score_thr=0.3,\n                         bbox_color=(72, 101, 241),\n                         text_color=(72, 101, 241),\n                         mask_color=None,\n                         thickness=2,\n                         font_size=13,\n                         win_name='',\n                         show=False,\n                         wait_time=0,\n                         out_file=None):\n        \"\"\"Draw `panoptic result` over `img`.\n\n        Args:\n            img (str or Tensor): The image to be displayed.\n            result (dict): The results.\n\n            score_thr (float, optional): Minimum score of bboxes to be shown.\n                Default: 0.3.\n            bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.\n               The tuple of color should be in BGR order. Default: 'green'.\n            text_color (str or tuple(int) or :obj:`Color`):Color of texts.\n               The tuple of color should be in BGR order. Default: 'green'.\n            mask_color (None or str or tuple(int) or :obj:`Color`):\n               Color of masks. The tuple of color should be in BGR order.\n               Default: None.\n            thickness (int): Thickness of lines. Default: 2.\n            font_size (int): Font size of texts. Default: 13.\n            win_name (str): The window name. Default: ''.\n            wait_time (float): Value of waitKey param.\n                Default: 0.\n            show (bool): Whether to show the image.\n                Default: False.\n            out_file (str or None): The filename to write the image.\n                Default: None.\n\n        Returns:\n            img (Tensor): Only if not `show` or `out_file`.\n        \"\"\"\n        img = mmcv.imread(img)\n        img = img.copy()\n        pan_results = result['pan_results']\n        # keep objects ahead\n        ids = np.unique(pan_results)[::-1]\n        legal_indices = ids != self.num_classes  # for VOID label\n        ids = ids[legal_indices]\n        labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)\n        segms = (pan_results[None] == ids[:, None, None])\n\n        # if out_file specified, do not show image in window\n        if out_file is not None:\n            show = False\n        # draw bounding boxes\n        img = imshow_det_bboxes(\n            img,\n            segms=segms,\n            labels=labels,\n            class_names=self.CLASSES,\n            bbox_color=bbox_color,\n            text_color=text_color,\n            mask_color=mask_color,\n            thickness=thickness,\n            font_size=font_size,\n            win_name=win_name,\n            show=show,\n            wait_time=wait_time,\n            out_file=out_file)\n\n        if not (show or out_file):\n            return img\n"
  },
  {
    "path": "mmdet/models/detectors/nasfcos.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass NASFCOS(SingleStageDetector):\n    \"\"\"NAS-FCOS: Fast Neural Architecture Search for Object Detection.\n\n    https://arxiv.org/abs/1906.0442\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                      test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/paa.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass PAA(SingleStageDetector):\n    \"\"\"Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_.\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                  test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/panoptic_fpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor\n\n\n@DETECTORS.register_module()\nclass PanopticFPN(TwoStagePanopticSegmentor):\n    r\"\"\"Implementation of `Panoptic feature pyramid\n    networks <https://arxiv.org/pdf/1901.02446>`_\"\"\"\n\n    def __init__(\n            self,\n            backbone,\n            neck=None,\n            rpn_head=None,\n            roi_head=None,\n            train_cfg=None,\n            test_cfg=None,\n            pretrained=None,\n            init_cfg=None,\n            # for panoptic segmentation\n            semantic_head=None,\n            panoptic_fusion_head=None):\n        super(PanopticFPN, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg,\n            semantic_head=semantic_head,\n            panoptic_fusion_head=panoptic_fusion_head)\n"
  },
  {
    "path": "mmdet/models/detectors/panoptic_two_stage_segmentor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\nimport torch\n\nfrom mmdet.core import INSTANCE_OFFSET, bbox2roi, multiclass_nms\nfrom mmdet.core.visualization import imshow_det_bboxes\nfrom ..builder import DETECTORS, build_head\nfrom ..roi_heads.mask_heads.fcn_mask_head import _do_paste_mask\nfrom .two_stage import TwoStageDetector\n\n\n@DETECTORS.register_module()\nclass TwoStagePanopticSegmentor(TwoStageDetector):\n    \"\"\"Base class of Two-stage Panoptic Segmentor.\n\n    As well as the components in TwoStageDetector, Panoptic Segmentor has extra\n    semantic_head and panoptic_fusion_head.\n    \"\"\"\n\n    def __init__(\n            self,\n            backbone,\n            neck=None,\n            rpn_head=None,\n            roi_head=None,\n            train_cfg=None,\n            test_cfg=None,\n            pretrained=None,\n            init_cfg=None,\n            # for panoptic segmentation\n            semantic_head=None,\n            panoptic_fusion_head=None):\n        super(TwoStagePanopticSegmentor,\n              self).__init__(backbone, neck, rpn_head, roi_head, train_cfg,\n                             test_cfg, pretrained, init_cfg)\n        if semantic_head is not None:\n            self.semantic_head = build_head(semantic_head)\n        if panoptic_fusion_head is not None:\n            panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None\n            panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()\n            panoptic_fusion_head_.update(test_cfg=panoptic_cfg)\n            self.panoptic_fusion_head = build_head(panoptic_fusion_head_)\n\n            self.num_things_classes = self.panoptic_fusion_head.\\\n                num_things_classes\n            self.num_stuff_classes = self.panoptic_fusion_head.\\\n                num_stuff_classes\n            self.num_classes = self.panoptic_fusion_head.num_classes\n\n    @property\n    def with_semantic_head(self):\n        return hasattr(self,\n                       'semantic_head') and self.semantic_head is not None\n\n    @property\n    def with_panoptic_fusion_head(self):\n        return hasattr(self, 'panoptic_fusion_heads') and \\\n               self.panoptic_fusion_head is not None\n\n    def forward_dummy(self, img):\n        \"\"\"Used for computing network flops.\n\n        See `mmdetection/tools/get_flops.py`\n        \"\"\"\n        raise NotImplementedError(\n            f'`forward_dummy` is not implemented in {self.__class__.__name__}')\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None,\n                      gt_semantic_seg=None,\n                      proposals=None,\n                      **kwargs):\n        x = self.extract_feat(img)\n        losses = dict()\n\n        # RPN forward and loss\n        if self.with_rpn:\n            proposal_cfg = self.train_cfg.get('rpn_proposal',\n                                              self.test_cfg.rpn)\n            rpn_losses, proposal_list = self.rpn_head.forward_train(\n                x,\n                img_metas,\n                gt_bboxes,\n                gt_labels=None,\n                gt_bboxes_ignore=gt_bboxes_ignore,\n                proposal_cfg=proposal_cfg)\n            losses.update(rpn_losses)\n        else:\n            proposal_list = proposals\n\n        roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n                                                 gt_bboxes, gt_labels,\n                                                 gt_bboxes_ignore, gt_masks,\n                                                 **kwargs)\n        losses.update(roi_losses)\n\n        semantic_loss = self.semantic_head.forward_train(x, gt_semantic_seg)\n        losses.update(semantic_loss)\n\n        return losses\n\n    def simple_test_mask(self,\n                         x,\n                         img_metas,\n                         det_bboxes,\n                         det_labels,\n                         rescale=False):\n        \"\"\"Simple test for mask head without augmentation.\"\"\"\n        img_shapes = tuple(meta['ori_shape']\n                           for meta in img_metas) if rescale else tuple(\n                               meta['pad_shape'] for meta in img_metas)\n        scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n        if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n            masks = []\n            for img_shape in img_shapes:\n                out_shape = (0, self.roi_head.bbox_head.num_classes) \\\n                            + img_shape[:2]\n                masks.append(det_bboxes[0].new_zeros(out_shape))\n            mask_pred = det_bboxes[0].new_zeros((0, 80, 28, 28))\n            mask_results = dict(\n                masks=masks, mask_pred=mask_pred, mask_feats=None)\n            return mask_results\n\n        _bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))]\n        if rescale:\n            if not isinstance(scale_factors[0], float):\n                scale_factors = [\n                    det_bboxes[0].new_tensor(scale_factor)\n                    for scale_factor in scale_factors\n                ]\n            _bboxes = [\n                _bboxes[i] * scale_factors[i] for i in range(len(_bboxes))\n            ]\n\n        mask_rois = bbox2roi(_bboxes)\n        mask_results = self.roi_head._mask_forward(x, mask_rois)\n        mask_pred = mask_results['mask_pred']\n        # split batch mask prediction back to each image\n        num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]\n        mask_preds = mask_pred.split(num_mask_roi_per_img, 0)\n\n        # resize the mask_preds to (K, H, W)\n        masks = []\n        for i in range(len(_bboxes)):\n            det_bbox = det_bboxes[i][:, :4]\n            det_label = det_labels[i]\n\n            mask_pred = mask_preds[i].sigmoid()\n\n            box_inds = torch.arange(mask_pred.shape[0])\n            mask_pred = mask_pred[box_inds, det_label][:, None]\n\n            img_h, img_w, _ = img_shapes[i]\n            mask_pred, _ = _do_paste_mask(\n                mask_pred, det_bbox, img_h, img_w, skip_empty=False)\n            masks.append(mask_pred)\n\n        mask_results['masks'] = masks\n\n        return mask_results\n\n    def simple_test(self, img, img_metas, proposals=None, rescale=False):\n        \"\"\"Test without Augmentation.\"\"\"\n        x = self.extract_feat(img)\n\n        if proposals is None:\n            proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n        else:\n            proposal_list = proposals\n\n        bboxes, scores = self.roi_head.simple_test_bboxes(\n            x, img_metas, proposal_list, None, rescale=rescale)\n\n        pan_cfg = self.test_cfg.panoptic\n        # class-wise predictions\n        det_bboxes = []\n        det_labels = []\n        for bboxe, score in zip(bboxes, scores):\n            det_bbox, det_label = multiclass_nms(bboxe, score,\n                                                 pan_cfg.score_thr,\n                                                 pan_cfg.nms,\n                                                 pan_cfg.max_per_img)\n            det_bboxes.append(det_bbox)\n            det_labels.append(det_label)\n\n        mask_results = self.simple_test_mask(\n            x, img_metas, det_bboxes, det_labels, rescale=rescale)\n        masks = mask_results['masks']\n\n        seg_preds = self.semantic_head.simple_test(x, img_metas, rescale)\n\n        results = []\n        for i in range(len(det_bboxes)):\n            pan_results = self.panoptic_fusion_head.simple_test(\n                det_bboxes[i], det_labels[i], masks[i], seg_preds[i])\n            pan_results = pan_results.int().detach().cpu().numpy()\n            result = dict(pan_results=pan_results)\n            results.append(result)\n        return results\n\n    def show_result(self,\n                    img,\n                    result,\n                    score_thr=0.3,\n                    bbox_color=(72, 101, 241),\n                    text_color=(72, 101, 241),\n                    mask_color=None,\n                    thickness=2,\n                    font_size=13,\n                    win_name='',\n                    show=False,\n                    wait_time=0,\n                    out_file=None):\n        \"\"\"Draw `result` over `img`.\n\n        Args:\n            img (str or Tensor): The image to be displayed.\n            result (dict): The results.\n\n            score_thr (float, optional): Minimum score of bboxes to be shown.\n                Default: 0.3.\n            bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.\n               The tuple of color should be in BGR order. Default: 'green'.\n            text_color (str or tuple(int) or :obj:`Color`):Color of texts.\n               The tuple of color should be in BGR order. Default: 'green'.\n            mask_color (None or str or tuple(int) or :obj:`Color`):\n               Color of masks. The tuple of color should be in BGR order.\n               Default: None.\n            thickness (int): Thickness of lines. Default: 2.\n            font_size (int): Font size of texts. Default: 13.\n            win_name (str): The window name. Default: ''.\n            wait_time (float): Value of waitKey param.\n                Default: 0.\n            show (bool): Whether to show the image.\n                Default: False.\n            out_file (str or None): The filename to write the image.\n                Default: None.\n\n        Returns:\n            img (Tensor): Only if not `show` or `out_file`.\n        \"\"\"\n        img = mmcv.imread(img)\n        img = img.copy()\n        pan_results = result['pan_results']\n        # keep objects ahead\n        ids = np.unique(pan_results)[::-1]\n        legal_indices = ids != self.num_classes  # for VOID label\n        ids = ids[legal_indices]\n        labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)\n        segms = (pan_results[None] == ids[:, None, None])\n\n        # if out_file specified, do not show image in window\n        if out_file is not None:\n            show = False\n        # draw bounding boxes\n        img = imshow_det_bboxes(\n            img,\n            segms=segms,\n            labels=labels,\n            class_names=self.CLASSES,\n            bbox_color=bbox_color,\n            text_color=text_color,\n            mask_color=mask_color,\n            thickness=thickness,\n            font_size=font_size,\n            win_name=win_name,\n            show=show,\n            wait_time=wait_time,\n            out_file=out_file)\n\n        if not (show or out_file):\n            return img\n"
  },
  {
    "path": "mmdet/models/detectors/point_rend.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .two_stage import TwoStageDetector\n\n\n@DETECTORS.register_module()\nclass PointRend(TwoStageDetector):\n    \"\"\"PointRend: Image Segmentation as Rendering\n\n    This detector is the implementation of\n    `PointRend <https://arxiv.org/abs/1912.08193>`_.\n\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 rpn_head,\n                 roi_head,\n                 train_cfg,\n                 test_cfg,\n                 neck=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(PointRend, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/queryinst.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .sparse_rcnn import SparseRCNN\n\n\n@DETECTORS.register_module()\nclass QueryInst(SparseRCNN):\n    r\"\"\"Implementation of\n    `Instances as Queries <http://arxiv.org/abs/2105.01928>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 rpn_head,\n                 roi_head,\n                 train_cfg,\n                 test_cfg,\n                 neck=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(QueryInst, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/reppoints_detector.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass RepPointsDetector(SingleStageDetector):\n    \"\"\"RepPoints: Point Set Representation for Object Detection.\n\n        This detector is the implementation of:\n        - RepPoints detector (https://arxiv.org/pdf/1904.11490)\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(RepPointsDetector,\n              self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,\n                             pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/retinanet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass RetinaNet(SingleStageDetector):\n    \"\"\"Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                        test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/rpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom inspect import signature\n\nimport mmcv\nimport torch\nfrom mmcv.image import tensor2imgs\n\nfrom mmdet.core import bbox_mapping\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\n\n\n@DETECTORS.register_module()\nclass RPN(BaseDetector):\n    \"\"\"Implementation of Region Proposal Network.\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 rpn_head,\n                 train_cfg,\n                 test_cfg,\n                 pretrained=None,\n                 init_cfg=None):\n        super(RPN, self).__init__(init_cfg)\n        if pretrained:\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            backbone.pretrained = pretrained\n        self.backbone = build_backbone(backbone)\n        self.neck = build_neck(neck) if neck is not None else None\n        rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n        rpn_head.update(train_cfg=rpn_train_cfg)\n        rpn_head.update(test_cfg=test_cfg.rpn)\n        self.rpn_head = build_head(rpn_head)\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    def extract_feat(self, img):\n        \"\"\"Extract features.\n\n        Args:\n            img (torch.Tensor): Image tensor with shape (n, c, h ,w).\n\n        Returns:\n            list[torch.Tensor]: Multi-level features that may have\n                different resolutions.\n        \"\"\"\n        x = self.backbone(img)\n        if self.with_neck:\n            x = self.neck(x)\n        return x\n\n    def forward_dummy(self, img):\n        \"\"\"Dummy forward function.\"\"\"\n        x = self.extract_feat(img)\n        rpn_outs = self.rpn_head(x)\n        return rpn_outs\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_bboxes=None,\n                      gt_bboxes_ignore=None):\n        \"\"\"\n        Args:\n            img (Tensor): Input images of shape (N, C, H, W).\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): A List of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                :class:`mmdet.datasets.pipelines.Collect`.\n            gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n            gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        if (isinstance(self.train_cfg.rpn, dict)\n                and self.train_cfg.rpn.get('debug', False)):\n            self.rpn_head.debug_imgs = tensor2imgs(img)\n\n        x = self.extract_feat(img)\n        losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,\n                                             gt_bboxes_ignore)\n        return losses\n\n    def simple_test(self, img, img_metas, rescale=False):\n        \"\"\"Test function without test time augmentation.\n\n        Args:\n            imgs (list[torch.Tensor]): List of multiple images\n            img_metas (list[dict]): List of image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[np.ndarray]: proposals\n        \"\"\"\n        x = self.extract_feat(img)\n        # get origin input shape to onnx dynamic input shape\n        if torch.onnx.is_in_onnx_export():\n            img_shape = torch._shape_as_tensor(img)[2:]\n            img_metas[0]['img_shape_for_onnx'] = img_shape\n        proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n        if rescale:\n            for proposals, meta in zip(proposal_list, img_metas):\n                proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])\n        if torch.onnx.is_in_onnx_export():\n            return proposal_list\n\n        return [proposal.cpu().numpy() for proposal in proposal_list]\n\n    def aug_test(self, imgs, img_metas, rescale=False):\n        \"\"\"Test function with test time augmentation.\n\n        Args:\n            imgs (list[torch.Tensor]): List of multiple images\n            img_metas (list[dict]): List of image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[np.ndarray]: proposals\n        \"\"\"\n        proposal_list = self.rpn_head.aug_test_rpn(\n            self.extract_feats(imgs), img_metas)\n        if not rescale:\n            for proposals, img_meta in zip(proposal_list, img_metas[0]):\n                img_shape = img_meta['img_shape']\n                scale_factor = img_meta['scale_factor']\n                flip = img_meta['flip']\n                flip_direction = img_meta['flip_direction']\n                proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,\n                                                scale_factor, flip,\n                                                flip_direction)\n        return [proposal.cpu().numpy() for proposal in proposal_list]\n\n    def show_result(self, data, result, top_k=20, **kwargs):\n        \"\"\"Show RPN proposals on the image.\n\n        Args:\n            data (str or np.ndarray): Image filename or loaded image.\n            result (Tensor or tuple): The results to draw over `img`\n                bbox_result or (bbox_result, segm_result).\n            top_k (int): Plot the first k bboxes only\n               if set positive. Default: 20\n\n        Returns:\n            np.ndarray: The image with bboxes drawn on it.\n        \"\"\"\n        if kwargs is not None:\n            kwargs['colors'] = 'green'\n            sig = signature(mmcv.imshow_bboxes)\n            for k in list(kwargs.keys()):\n                if k not in sig.parameters:\n                    kwargs.pop(k)\n        mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs)\n"
  },
  {
    "path": "mmdet/models/detectors/scnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .cascade_rcnn import CascadeRCNN\n\n\n@DETECTORS.register_module()\nclass SCNet(CascadeRCNN):\n    \"\"\"Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_\"\"\"\n\n    def __init__(self, **kwargs):\n        super(SCNet, self).__init__(**kwargs)\n"
  },
  {
    "path": "mmdet/models/detectors/single_stage.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\n\nfrom mmdet.core import bbox2result\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\n\n\n@DETECTORS.register_module()\nclass SingleStageDetector(BaseDetector):\n    \"\"\"Base class for single-stage detectors.\n\n    Single-stage detectors directly and densely predict bounding boxes on the\n    output features of the backbone+neck.\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck=None,\n                 bbox_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(SingleStageDetector, self).__init__(init_cfg)\n        if pretrained:\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            backbone.pretrained = pretrained\n        self.backbone = build_backbone(backbone)\n        if neck is not None:\n            self.neck = build_neck(neck)\n        bbox_head.update(train_cfg=train_cfg)\n        bbox_head.update(test_cfg=test_cfg)\n        self.bbox_head = build_head(bbox_head)\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    def extract_feat(self, img):\n        \"\"\"Directly extract features from the backbone+neck.\"\"\"\n        x = self.backbone(img)\n        if self.with_neck:\n            x = self.neck(x)\n        return x\n\n    def forward_dummy(self, img):\n        \"\"\"Used for computing network flops.\n\n        See `mmdetection/tools/analysis_tools/get_flops.py`\n        \"\"\"\n        x = self.extract_feat(img)\n        outs = self.bbox_head(x)\n        return outs\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None):\n        \"\"\"\n        Args:\n            img (Tensor): Input images of shape (N, C, H, W).\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): A List of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                :class:`mmdet.datasets.pipelines.Collect`.\n            gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): Class indices corresponding to each box\n            gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        super(SingleStageDetector, self).forward_train(img, img_metas)\n        x = self.extract_feat(img)\n        losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,\n                                              gt_labels, gt_bboxes_ignore)\n        return losses\n\n    def simple_test(self, img, img_metas, rescale=False):\n        \"\"\"Test function without test-time augmentation.\n\n        Args:\n            img (torch.Tensor): Images with shape (N, C, H, W).\n            img_metas (list[dict]): List of image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[list[np.ndarray]]: BBox results of each image and classes.\n                The outer list corresponds to each image. The inner list\n                corresponds to each class.\n        \"\"\"\n        feat = self.extract_feat(img)\n        results_list = self.bbox_head.simple_test(\n            feat, img_metas, rescale=rescale)\n        bbox_results = [\n            bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n            for det_bboxes, det_labels in results_list\n        ]\n        return bbox_results\n\n    def aug_test(self, imgs, img_metas, rescale=False):\n        \"\"\"Test function with test time augmentation.\n\n        Args:\n            imgs (list[Tensor]): the outer list indicates test-time\n                augmentations and inner Tensor should have a shape NxCxHxW,\n                which contains all images in the batch.\n            img_metas (list[list[dict]]): the outer list indicates test-time\n                augs (multiscale, flip, etc.) and the inner list indicates\n                images in a batch. each dict has image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[list[np.ndarray]]: BBox results of each image and classes.\n                The outer list corresponds to each image. The inner list\n                corresponds to each class.\n        \"\"\"\n        assert hasattr(self.bbox_head, 'aug_test'), \\\n            f'{self.bbox_head.__class__.__name__}' \\\n            ' does not support test-time augmentation'\n\n        feats = self.extract_feats(imgs)\n        results_list = self.bbox_head.aug_test(\n            feats, img_metas, rescale=rescale)\n        bbox_results = [\n            bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)\n            for det_bboxes, det_labels in results_list\n        ]\n        return bbox_results\n\n    def onnx_export(self, img, img_metas, with_nms=True):\n        \"\"\"Test function without test time augmentation.\n\n        Args:\n            img (torch.Tensor): input images.\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n                and class labels of shape [N, num_det].\n        \"\"\"\n        x = self.extract_feat(img)\n        outs = self.bbox_head(x)\n        # get origin input shape to support onnx dynamic shape\n\n        # get shape as tensor\n        img_shape = torch._shape_as_tensor(img)[2:]\n        img_metas[0]['img_shape_for_onnx'] = img_shape\n        # get pad input shape to support onnx dynamic shape for exporting\n        # `CornerNet` and `CentripetalNet`, which 'pad_shape' is used\n        # for inference\n        img_metas[0]['pad_shape_for_onnx'] = img_shape\n\n        if len(outs) == 2:\n            # add dummy score_factor\n            outs = (*outs, None)\n        # TODO Can we change to `get_bboxes` when `onnx_export` fail\n        det_bboxes, det_labels = self.bbox_head.onnx_export(\n            *outs, img_metas, with_nms=with_nms)\n\n        return det_bboxes, det_labels\n"
  },
  {
    "path": "mmdet/models/detectors/single_stage_instance_seg.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\n\nimport mmcv\nimport numpy as np\nimport torch\n\nfrom mmdet.core.visualization.image import imshow_det_bboxes\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\n\nINF = 1e8\n\n\n@DETECTORS.register_module()\nclass SingleStageInstanceSegmentor(BaseDetector):\n    \"\"\"Base class for single-stage instance segmentors.\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck=None,\n                 bbox_head=None,\n                 mask_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n\n        if pretrained:\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            backbone.pretrained = pretrained\n        super(SingleStageInstanceSegmentor, self).__init__(init_cfg=init_cfg)\n        self.backbone = build_backbone(backbone)\n        if neck is not None:\n            self.neck = build_neck(neck)\n        else:\n            self.neck = None\n        if bbox_head is not None:\n            bbox_head.update(train_cfg=copy.deepcopy(train_cfg))\n            bbox_head.update(test_cfg=copy.deepcopy(test_cfg))\n            self.bbox_head = build_head(bbox_head)\n        else:\n            self.bbox_head = None\n\n        assert mask_head, f'`mask_head` must ' \\\n                          f'be implemented in {self.__class__.__name__}'\n        mask_head.update(train_cfg=copy.deepcopy(train_cfg))\n        mask_head.update(test_cfg=copy.deepcopy(test_cfg))\n        self.mask_head = build_head(mask_head)\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    def extract_feat(self, img):\n        \"\"\"Directly extract features from the backbone and neck.\"\"\"\n        x = self.backbone(img)\n        if self.with_neck:\n            x = self.neck(x)\n        return x\n\n    def forward_dummy(self, img):\n        \"\"\"Used for computing network flops.\n\n        See `mmdetection/tools/analysis_tools/get_flops.py`\n        \"\"\"\n        raise NotImplementedError(\n            f'`forward_dummy` is not implemented in {self.__class__.__name__}')\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_masks,\n                      gt_labels,\n                      gt_bboxes=None,\n                      gt_bboxes_ignore=None,\n                      **kwargs):\n        \"\"\"\n        Args:\n            img (Tensor): Input images of shape (B, C, H, W).\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): A List of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                :class:`mmdet.datasets.pipelines.Collect`.\n            gt_masks (list[:obj:`BitmapMasks`] | None) : The segmentation\n                masks for each box.\n            gt_labels (list[Tensor]): Class indices corresponding to each box\n            gt_bboxes (list[Tensor]): Each item is the truth boxes\n                of each image in [tl_x, tl_y, br_x, br_y] format.\n                Default: None.\n            gt_bboxes_ignore (list[Tensor] | None): Specify which bounding\n                boxes can be ignored when computing the loss.\n\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n\n        gt_masks = [\n            gt_mask.to_tensor(dtype=torch.bool, device=img.device)\n            for gt_mask in gt_masks\n        ]\n        x = self.extract_feat(img)\n        losses = dict()\n\n        # CondInst and YOLACT have bbox_head\n        if self.bbox_head:\n            # bbox_head_preds is a tuple\n            bbox_head_preds = self.bbox_head(x)\n            # positive_infos is a list of obj:`InstanceData`\n            # It contains the information about the positive samples\n            # CondInst, YOLACT\n            det_losses, positive_infos = self.bbox_head.loss(\n                *bbox_head_preds,\n                gt_bboxes=gt_bboxes,\n                gt_labels=gt_labels,\n                gt_masks=gt_masks,\n                img_metas=img_metas,\n                gt_bboxes_ignore=gt_bboxes_ignore,\n                **kwargs)\n            losses.update(det_losses)\n        else:\n            positive_infos = None\n\n        mask_loss = self.mask_head.forward_train(\n            x,\n            gt_labels,\n            gt_masks,\n            img_metas,\n            positive_infos=positive_infos,\n            gt_bboxes=gt_bboxes,\n            gt_bboxes_ignore=gt_bboxes_ignore,\n            **kwargs)\n        # avoid loss override\n        assert not set(mask_loss.keys()) & set(losses.keys())\n\n        losses.update(mask_loss)\n        return losses\n\n    def simple_test(self, img, img_metas, rescale=False):\n        \"\"\"Test function without test-time augmentation.\n\n        Args:\n            img (torch.Tensor): Images with shape (B, C, H, W).\n            img_metas (list[dict]): List of image information.\n            rescale (bool, optional): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list(tuple): Formatted bbox and mask results of multiple \\\n                images. The outer list corresponds to each image. \\\n                Each tuple contains two type of results of single image:\n\n                - bbox_results (list[np.ndarray]): BBox results of\n                  single image. The list corresponds to each class.\n                  each ndarray has a shape (N, 5), N is the number of\n                  bboxes with this category, and last dimension\n                  5 arrange as (x1, y1, x2, y2, scores).\n                - mask_results (list[np.ndarray]): Mask results of\n                  single image. The list corresponds to each class.\n                  each ndarray has a shape (N, img_h, img_w), N\n                  is the number of masks with this category.\n        \"\"\"\n        feat = self.extract_feat(img)\n        if self.bbox_head:\n            outs = self.bbox_head(feat)\n            # results_list is list[obj:`InstanceData`]\n            results_list = self.bbox_head.get_results(\n                *outs, img_metas=img_metas, cfg=self.test_cfg, rescale=rescale)\n        else:\n            results_list = None\n\n        results_list = self.mask_head.simple_test(\n            feat, img_metas, rescale=rescale, instances_list=results_list)\n\n        format_results_list = []\n        for results in results_list:\n            format_results_list.append(self.format_results(results))\n\n        return format_results_list\n\n    def format_results(self, results):\n        \"\"\"Format the model predictions according to the interface with\n        dataset.\n\n        Args:\n            results (:obj:`InstanceData`): Processed\n                results of single images. Usually contains\n                following keys.\n\n                - scores (Tensor): Classification scores, has shape\n                  (num_instance,)\n                - labels (Tensor): Has shape (num_instances,).\n                - masks (Tensor): Processed mask results, has\n                  shape (num_instances, h, w).\n\n        Returns:\n            tuple: Formatted bbox and mask results.. It contains two items:\n\n                - bbox_results (list[np.ndarray]): BBox results of\n                  single image. The list corresponds to each class.\n                  each ndarray has a shape (N, 5), N is the number of\n                  bboxes with this category, and last dimension\n                  5 arrange as (x1, y1, x2, y2, scores).\n                - mask_results (list[np.ndarray]): Mask results of\n                  single image. The list corresponds to each class.\n                  each ndarray has shape (N, img_h, img_w), N\n                  is the number of masks with this category.\n        \"\"\"\n        data_keys = results.keys()\n        assert 'scores' in data_keys\n        assert 'labels' in data_keys\n\n        assert 'masks' in data_keys, \\\n            'results should contain ' \\\n            'masks when format the results '\n        mask_results = [[] for _ in range(self.mask_head.num_classes)]\n\n        num_masks = len(results)\n\n        if num_masks == 0:\n            bbox_results = [\n                np.zeros((0, 5), dtype=np.float32)\n                for _ in range(self.mask_head.num_classes)\n            ]\n            return bbox_results, mask_results\n\n        labels = results.labels.detach().cpu().numpy()\n\n        if 'bboxes' not in results:\n            # create dummy bbox results to store the scores\n            results.bboxes = results.scores.new_zeros(len(results), 4)\n\n        det_bboxes = torch.cat([results.bboxes, results.scores[:, None]],\n                               dim=-1)\n        det_bboxes = det_bboxes.detach().cpu().numpy()\n        bbox_results = [\n            det_bboxes[labels == i, :]\n            for i in range(self.mask_head.num_classes)\n        ]\n\n        masks = results.masks.detach().cpu().numpy()\n\n        for idx in range(num_masks):\n            mask = masks[idx]\n            mask_results[labels[idx]].append(mask)\n\n        return bbox_results, mask_results\n\n    def aug_test(self, imgs, img_metas, rescale=False):\n        raise NotImplementedError\n\n    def show_result(self,\n                    img,\n                    result,\n                    score_thr=0.3,\n                    bbox_color=(72, 101, 241),\n                    text_color=(72, 101, 241),\n                    mask_color=None,\n                    thickness=2,\n                    font_size=13,\n                    win_name='',\n                    show=False,\n                    wait_time=0,\n                    out_file=None):\n        \"\"\"Draw `result` over `img`.\n\n        Args:\n            img (str or Tensor): The image to be displayed.\n            result (tuple): Format bbox and mask results.\n                It contains two items:\n\n                - bbox_results (list[np.ndarray]): BBox results of\n                  single image. The list corresponds to each class.\n                  each ndarray has a shape (N, 5), N is the number of\n                  bboxes with this category, and last dimension\n                  5 arrange as (x1, y1, x2, y2, scores).\n                - mask_results (list[np.ndarray]): Mask results of\n                  single image. The list corresponds to each class.\n                  each ndarray has shape (N, img_h, img_w), N\n                  is the number of masks with this category.\n\n            score_thr (float, optional): Minimum score of bboxes to be shown.\n                Default: 0.3.\n            bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.\n               The tuple of color should be in BGR order. Default: 'green'\n            text_color (str or tuple(int) or :obj:`Color`):Color of texts.\n               The tuple of color should be in BGR order. Default: 'green'\n            mask_color (None or str or tuple(int) or :obj:`Color`):\n               Color of masks. The tuple of color should be in BGR order.\n               Default: None\n            thickness (int): Thickness of lines. Default: 2\n            font_size (int): Font size of texts. Default: 13\n            win_name (str): The window name. Default: ''\n            wait_time (float): Value of waitKey param.\n                Default: 0.\n            show (bool): Whether to show the image.\n                Default: False.\n            out_file (str or None): The filename to write the image.\n                Default: None.\n\n        Returns:\n            img (Tensor): Only if not `show` or `out_file`\n        \"\"\"\n\n        assert isinstance(result, tuple)\n        bbox_result, mask_result = result\n        bboxes = np.vstack(bbox_result)\n        img = mmcv.imread(img)\n        img = img.copy()\n        labels = [\n            np.full(bbox.shape[0], i, dtype=np.int32)\n            for i, bbox in enumerate(bbox_result)\n        ]\n        labels = np.concatenate(labels)\n        if len(labels) == 0:\n            bboxes = np.zeros([0, 5])\n            masks = np.zeros([0, 0, 0])\n        # draw segmentation masks\n        else:\n            masks = mmcv.concat_list(mask_result)\n\n            if isinstance(masks[0], torch.Tensor):\n                masks = torch.stack(masks, dim=0).detach().cpu().numpy()\n            else:\n                masks = np.stack(masks, axis=0)\n            # dummy bboxes\n            if bboxes[:, :4].sum() == 0:\n                num_masks = len(bboxes)\n                x_any = masks.any(axis=1)\n                y_any = masks.any(axis=2)\n                for idx in range(num_masks):\n                    x = np.where(x_any[idx, :])[0]\n                    y = np.where(y_any[idx, :])[0]\n                    if len(x) > 0 and len(y) > 0:\n                        bboxes[idx, :4] = np.array(\n                            [x[0], y[0], x[-1] + 1, y[-1] + 1],\n                            dtype=np.float32)\n        # if out_file specified, do not show image in window\n        if out_file is not None:\n            show = False\n        # draw bounding boxes\n        img = imshow_det_bboxes(\n            img,\n            bboxes,\n            labels,\n            masks,\n            class_names=self.CLASSES,\n            score_thr=score_thr,\n            bbox_color=bbox_color,\n            text_color=text_color,\n            mask_color=mask_color,\n            thickness=thickness,\n            font_size=font_size,\n            win_name=win_name,\n            show=show,\n            wait_time=wait_time,\n            out_file=out_file)\n\n        if not (show or out_file):\n            return img\n"
  },
  {
    "path": "mmdet/models/detectors/solo.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage_instance_seg import SingleStageInstanceSegmentor\n\n\n@DETECTORS.register_module()\nclass SOLO(SingleStageInstanceSegmentor):\n    \"\"\"`SOLO: Segmenting Objects by Locations\n    <https://arxiv.org/abs/1912.04488>`_\n\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck=None,\n                 bbox_head=None,\n                 mask_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=None,\n                 pretrained=None):\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            mask_head=mask_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg,\n            pretrained=pretrained)\n"
  },
  {
    "path": "mmdet/models/detectors/solov2.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage_instance_seg import SingleStageInstanceSegmentor\n\n\n@DETECTORS.register_module()\nclass SOLOv2(SingleStageInstanceSegmentor):\n    \"\"\"`SOLOv2: Dynamic and Fast Instance Segmentation\n    <https://arxiv.org/abs/2003.10152>`_\n\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck=None,\n                 bbox_head=None,\n                 mask_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 init_cfg=None,\n                 pretrained=None):\n        super().__init__(\n            backbone=backbone,\n            neck=neck,\n            bbox_head=bbox_head,\n            mask_head=mask_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            init_cfg=init_cfg,\n            pretrained=pretrained)\n"
  },
  {
    "path": "mmdet/models/detectors/sparse_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .two_stage import TwoStageDetector\n\n\n@DETECTORS.register_module()\nclass SparseRCNN(TwoStageDetector):\n    r\"\"\"Implementation of `Sparse R-CNN: End-to-End Object Detection with\n    Learnable Proposals <https://arxiv.org/abs/2011.12450>`_\"\"\"\n\n    def __init__(self, *args, **kwargs):\n        super(SparseRCNN, self).__init__(*args, **kwargs)\n        assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \\\n            'do not support external proposals'\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None,\n                      proposals=None,\n                      **kwargs):\n        \"\"\"Forward function of SparseR-CNN and QueryInst in train stage.\n\n        Args:\n            img (Tensor): of shape (N, C, H, W) encoding input images.\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): list of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                :class:`mmdet.datasets.pipelines.Collect`.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            gt_bboxes_ignore (None | list[Tensor): specify which bounding\n                boxes can be ignored when computing the loss.\n            gt_masks (List[Tensor], optional) : Segmentation masks for\n                each box. This is required to train QueryInst.\n            proposals (List[Tensor], optional): override rpn proposals with\n                custom proposals. Use when `with_rpn` is False.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n\n        assert proposals is None, 'Sparse R-CNN and QueryInst ' \\\n            'do not support external proposals'\n\n        x = self.extract_feat(img)\n        proposal_boxes, proposal_features, imgs_whwh = \\\n            self.rpn_head.forward_train(x, img_metas)\n        roi_losses = self.roi_head.forward_train(\n            x,\n            proposal_boxes,\n            proposal_features,\n            img_metas,\n            gt_bboxes,\n            gt_labels,\n            gt_bboxes_ignore=gt_bboxes_ignore,\n            gt_masks=gt_masks,\n            imgs_whwh=imgs_whwh)\n        return roi_losses\n\n    def simple_test(self, img, img_metas, rescale=False):\n        \"\"\"Test function without test time augmentation.\n\n        Args:\n            imgs (list[torch.Tensor]): List of multiple images\n            img_metas (list[dict]): List of image information.\n            rescale (bool): Whether to rescale the results.\n                Defaults to False.\n\n        Returns:\n            list[list[np.ndarray]]: BBox results of each image and classes.\n                The outer list corresponds to each image. The inner list\n                corresponds to each class.\n        \"\"\"\n        x = self.extract_feat(img)\n        proposal_boxes, proposal_features, imgs_whwh = \\\n            self.rpn_head.simple_test_rpn(x, img_metas)\n        results = self.roi_head.simple_test(\n            x,\n            proposal_boxes,\n            proposal_features,\n            img_metas,\n            imgs_whwh=imgs_whwh,\n            rescale=rescale)\n        return results\n\n    def forward_dummy(self, img):\n        \"\"\"Used for computing network flops.\n\n        See `mmdetection/tools/analysis_tools/get_flops.py`\n        \"\"\"\n        # backbone\n        x = self.extract_feat(img)\n        # rpn\n        num_imgs = len(img)\n        dummy_img_metas = [\n            dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)\n        ]\n        proposal_boxes, proposal_features, imgs_whwh = \\\n            self.rpn_head.simple_test_rpn(x, dummy_img_metas)\n        # roi_head\n        roi_outs = self.roi_head.forward_dummy(x, proposal_boxes,\n                                               proposal_features,\n                                               dummy_img_metas)\n        return roi_outs\n"
  },
  {
    "path": "mmdet/models/detectors/tood.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass TOOD(SingleStageDetector):\n    r\"\"\"Implementation of `TOOD: Task-aligned One-stage Object Detection.\n    <https://arxiv.org/abs/2108.07755>`_.\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                   test_cfg, pretrained, init_cfg)\n\n    def set_epoch(self, epoch):\n        self.bbox_head.epoch = epoch\n"
  },
  {
    "path": "mmdet/models/detectors/trident_faster_rcnn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .faster_rcnn import FasterRCNN\n\n\n@DETECTORS.register_module()\nclass TridentFasterRCNN(FasterRCNN):\n    \"\"\"Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 rpn_head,\n                 roi_head,\n                 train_cfg,\n                 test_cfg,\n                 neck=None,\n                 pretrained=None,\n                 init_cfg=None):\n\n        super(TridentFasterRCNN, self).__init__(\n            backbone=backbone,\n            neck=neck,\n            rpn_head=rpn_head,\n            roi_head=roi_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n        assert self.backbone.num_branch == self.roi_head.num_branch\n        assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx\n        self.num_branch = self.backbone.num_branch\n        self.test_branch_idx = self.backbone.test_branch_idx\n\n    def simple_test(self, img, img_metas, proposals=None, rescale=False):\n        \"\"\"Test without augmentation.\"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        x = self.extract_feat(img)\n        if proposals is None:\n            num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)\n            trident_img_metas = img_metas * num_branch\n            proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)\n        else:\n            proposal_list = proposals\n        # TODO： Fix trident_img_metas undefined errors\n        #  when proposals is specified\n        return self.roi_head.simple_test(\n            x, proposal_list, trident_img_metas, rescale=rescale)\n\n    def aug_test(self, imgs, img_metas, rescale=False):\n        \"\"\"Test with augmentations.\n\n        If rescale is False, then returned bboxes and masks will fit the scale\n        of imgs[0].\n        \"\"\"\n        x = self.extract_feats(imgs)\n        num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)\n        trident_img_metas = [img_metas * num_branch for img_metas in img_metas]\n        proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)\n        return self.roi_head.aug_test(\n            x, proposal_list, img_metas, rescale=rescale)\n\n    def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):\n        \"\"\"make copies of img and gts to fit multi-branch.\"\"\"\n        trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)\n        trident_gt_labels = tuple(gt_labels * self.num_branch)\n        trident_img_metas = tuple(img_metas * self.num_branch)\n\n        return super(TridentFasterRCNN,\n                     self).forward_train(img, trident_img_metas,\n                                         trident_gt_bboxes, trident_gt_labels)\n"
  },
  {
    "path": "mmdet/models/detectors/two_stage.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\n\nfrom ..builder import DETECTORS, build_backbone, build_head, build_neck\nfrom .base import BaseDetector\n\n\n@DETECTORS.register_module()\nclass TwoStageDetector(BaseDetector):\n    \"\"\"Base class for two-stage detectors.\n\n    Two-stage detectors typically consisting of a region proposal network and a\n    task-specific regression head.\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck=None,\n                 rpn_head=None,\n                 roi_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(TwoStageDetector, self).__init__(init_cfg)\n        if pretrained:\n            warnings.warn('DeprecationWarning: pretrained is deprecated, '\n                          'please use \"init_cfg\" instead')\n            backbone.pretrained = pretrained\n        self.backbone = build_backbone(backbone)\n\n        if neck is not None:\n            self.neck = build_neck(neck)\n\n        if rpn_head is not None:\n            rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None\n            rpn_head_ = rpn_head.copy()\n            rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)\n            self.rpn_head = build_head(rpn_head_)\n\n        if roi_head is not None:\n            # update train and test cfg here for now\n            # TODO: refactor assigner & sampler\n            rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None\n            roi_head.update(train_cfg=rcnn_train_cfg)\n            roi_head.update(test_cfg=test_cfg.rcnn)\n            roi_head.pretrained = pretrained\n            self.roi_head = build_head(roi_head)\n\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n\n    @property\n    def with_rpn(self):\n        \"\"\"bool: whether the detector has RPN\"\"\"\n        return hasattr(self, 'rpn_head') and self.rpn_head is not None\n\n    @property\n    def with_roi_head(self):\n        \"\"\"bool: whether the detector has a RoI head\"\"\"\n        return hasattr(self, 'roi_head') and self.roi_head is not None\n\n    def extract_feat(self, img):\n        \"\"\"Directly extract features from the backbone+neck.\"\"\"\n        x = self.backbone(img)\n        if self.with_neck:\n            x = self.neck(x)\n        return x\n\n    def forward_dummy(self, img):\n        \"\"\"Used for computing network flops.\n\n        See `mmdetection/tools/analysis_tools/get_flops.py`\n        \"\"\"\n        outs = ()\n        # backbone\n        x = self.extract_feat(img)\n        # rpn\n        if self.with_rpn:\n            rpn_outs = self.rpn_head(x)\n            outs = outs + (rpn_outs, )\n        proposals = torch.randn(1000, 4).to(img.device)\n        # roi_head\n        roi_outs = self.roi_head.forward_dummy(x, proposals)\n        outs = outs + (roi_outs, )\n        return outs\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None,\n                      proposals=None,\n                      **kwargs):\n        \"\"\"\n        Args:\n            img (Tensor): of shape (N, C, H, W) encoding input images.\n                Typically these should be mean centered and std scaled.\n\n            img_metas (list[dict]): list of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n            gt_labels (list[Tensor]): class indices corresponding to each box\n\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n            gt_masks (None | Tensor) : true segmentation masks for each box\n                used if the architecture supports a segmentation task.\n\n            proposals : override rpn proposals with custom proposals. Use when\n                `with_rpn` is False.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        x = self.extract_feat(img)\n\n        losses = dict()\n\n        # RPN forward and loss\n        if self.with_rpn:\n            proposal_cfg = self.train_cfg.get('rpn_proposal',\n                                              self.test_cfg.rpn)\n            rpn_losses, proposal_list = self.rpn_head.forward_train(\n                x,\n                img_metas,\n                gt_bboxes,\n                gt_labels=None,\n                gt_bboxes_ignore=gt_bboxes_ignore,\n                proposal_cfg=proposal_cfg,\n                **kwargs)\n            losses.update(rpn_losses)\n        else:\n            proposal_list = proposals\n\n        roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,\n                                                 gt_bboxes, gt_labels,\n                                                 gt_bboxes_ignore, gt_masks,\n                                                 **kwargs)\n        losses.update(roi_losses)\n\n        return losses\n\n    async def async_simple_test(self,\n                                img,\n                                img_meta,\n                                proposals=None,\n                                rescale=False):\n        \"\"\"Async test without augmentation.\"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        x = self.extract_feat(img)\n\n        if proposals is None:\n            proposal_list = await self.rpn_head.async_simple_test_rpn(\n                x, img_meta)\n        else:\n            proposal_list = proposals\n\n        return await self.roi_head.async_simple_test(\n            x, proposal_list, img_meta, rescale=rescale)\n\n    def simple_test(self, img, img_metas, proposals=None, rescale=False):\n        \"\"\"Test without augmentation.\"\"\"\n\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        x = self.extract_feat(img)\n        if proposals is None:\n            proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)\n        else:\n            proposal_list = proposals\n\n        return self.roi_head.simple_test(\n            x, proposal_list, img_metas, rescale=rescale)\n\n    def aug_test(self, imgs, img_metas, rescale=False):\n        \"\"\"Test with augmentations.\n\n        If rescale is False, then returned bboxes and masks will fit the scale\n        of imgs[0].\n        \"\"\"\n        x = self.extract_feats(imgs)\n        proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)\n        return self.roi_head.aug_test(\n            x, proposal_list, img_metas, rescale=rescale)\n\n    def onnx_export(self, img, img_metas):\n\n        img_shape = torch._shape_as_tensor(img)[2:]\n        img_metas[0]['img_shape_for_onnx'] = img_shape\n        x = self.extract_feat(img)\n        proposals = self.rpn_head.onnx_export(x, img_metas)\n        if hasattr(self.roi_head, 'onnx_export'):\n            return self.roi_head.onnx_export(x, proposals, img_metas)\n        else:\n            raise NotImplementedError(\n                f'{self.__class__.__name__} can not '\n                f'be exported to ONNX. Please refer to the '\n                f'list of supported models,'\n                f'https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html#list-of-supported-models-exportable-to-onnx'  # noqa E501\n            )\n"
  },
  {
    "path": "mmdet/models/detectors/vfnet.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass VFNet(SingleStageDetector):\n    \"\"\"Implementation of `VarifocalNet\n    (VFNet).<https://arxiv.org/abs/2008.13367>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                    test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/yolact.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.core import bbox2result\nfrom ..builder import DETECTORS, build_head\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass YOLACT(SingleStageDetector):\n    \"\"\"Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 segm_head,\n                 mask_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                     test_cfg, pretrained, init_cfg)\n        self.segm_head = build_head(segm_head)\n        self.mask_head = build_head(mask_head)\n\n    def forward_dummy(self, img):\n        \"\"\"Used for computing network flops.\n\n        See `mmdetection/tools/analysis_tools/get_flops.py`\n        \"\"\"\n        feat = self.extract_feat(img)\n        bbox_outs = self.bbox_head(feat)\n        prototypes = self.mask_head.forward_dummy(feat[0])\n        return (bbox_outs, prototypes)\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None):\n        \"\"\"\n        Args:\n            img (Tensor): of shape (N, C, H, W) encoding input images.\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): list of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n            gt_masks (None | Tensor) : true segmentation masks for each box\n                used if the architecture supports a segmentation task.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        # convert Bitmap mask or Polygon Mask to Tensor here\n        gt_masks = [\n            gt_mask.to_tensor(dtype=torch.uint8, device=img.device)\n            for gt_mask in gt_masks\n        ]\n\n        x = self.extract_feat(img)\n\n        cls_score, bbox_pred, coeff_pred = self.bbox_head(x)\n        bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,\n                                                          img_metas)\n        losses, sampling_results = self.bbox_head.loss(\n            *bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)\n\n        segm_head_outs = self.segm_head(x[0])\n        loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)\n        losses.update(loss_segm)\n\n        mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,\n                                   sampling_results)\n        loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,\n                                        img_metas, sampling_results)\n        losses.update(loss_mask)\n\n        # check NaN and Inf\n        for loss_name in losses.keys():\n            assert torch.isfinite(torch.stack(losses[loss_name]))\\\n                .all().item(), '{} becomes infinite or NaN!'\\\n                .format(loss_name)\n\n        return losses\n\n    def simple_test(self, img, img_metas, rescale=False):\n        \"\"\"Test function without test-time augmentation.\"\"\"\n        feat = self.extract_feat(img)\n        det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test(\n            feat, img_metas, rescale=rescale)\n        bbox_results = [\n            bbox2result(det_bbox, det_label, self.bbox_head.num_classes)\n            for det_bbox, det_label in zip(det_bboxes, det_labels)\n        ]\n\n        segm_results = self.mask_head.simple_test(\n            feat,\n            det_bboxes,\n            det_labels,\n            det_coeffs,\n            img_metas,\n            rescale=rescale)\n\n        return list(zip(bbox_results, segm_results))\n\n    def aug_test(self, imgs, img_metas, rescale=False):\n        \"\"\"Test with augmentations.\"\"\"\n        raise NotImplementedError(\n            'YOLACT does not support test-time augmentation')\n"
  },
  {
    "path": "mmdet/models/detectors/yolo.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Copyright (c) 2019 Western Digital Corporation or its affiliates.\nimport torch\n\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass YOLOV3(SingleStageDetector):\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                     test_cfg, pretrained, init_cfg)\n\n    def onnx_export(self, img, img_metas):\n        \"\"\"Test function for exporting to ONNX, without test time augmentation.\n\n        Args:\n            img (torch.Tensor): input images.\n            img_metas (list[dict]): List of image information.\n\n        Returns:\n            tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n                and class labels of shape [N, num_det].\n        \"\"\"\n        x = self.extract_feat(img)\n        outs = self.bbox_head.forward(x)\n        # get shape as tensor\n        img_shape = torch._shape_as_tensor(img)[2:]\n        img_metas[0]['img_shape_for_onnx'] = img_shape\n\n        det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)\n\n        return det_bboxes, det_labels\n"
  },
  {
    "path": "mmdet/models/detectors/yolof.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass YOLOF(SingleStageDetector):\n    r\"\"\"Implementation of `You Only Look One-level Feature\n    <https://arxiv.org/abs/2103.09460>`_\"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                    test_cfg, pretrained, init_cfg)\n"
  },
  {
    "path": "mmdet/models/detectors/yolox.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport random\n\nimport torch\nimport torch.distributed as dist\nimport torch.nn.functional as F\nfrom mmcv.runner import get_dist_info\n\nfrom ...utils import log_img_scale\nfrom ..builder import DETECTORS\nfrom .single_stage import SingleStageDetector\n\n\n@DETECTORS.register_module()\nclass YOLOX(SingleStageDetector):\n    r\"\"\"Implementation of `YOLOX: Exceeding YOLO Series in 2021\n    <https://arxiv.org/abs/2107.08430>`_\n\n    Note: Considering the trade-off between training speed and accuracy,\n    multi-scale training is temporarily kept. More elegant implementation\n    will be adopted in the future.\n\n    Args:\n        backbone (nn.Module): The backbone module.\n        neck (nn.Module): The neck module.\n        bbox_head (nn.Module): The bbox head module.\n        train_cfg (obj:`ConfigDict`, optional): The training config\n            of YOLOX. Default: None.\n        test_cfg (obj:`ConfigDict`, optional): The testing config\n            of YOLOX. Default: None.\n        pretrained (str, optional): model pretrained path.\n            Default: None.\n        input_size (tuple): The model default input image size. The shape\n            order should be (height, width). Default: (640, 640).\n        size_multiplier (int): Image size multiplication factor.\n            Default: 32.\n        random_size_range (tuple): The multi-scale random range during\n            multi-scale training. The real training image size will\n            be multiplied by size_multiplier. Default: (15, 25).\n        random_size_interval (int): The iter interval of change\n            image size. Default: 10.\n        init_cfg (dict, optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 backbone,\n                 neck,\n                 bbox_head,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 input_size=(640, 640),\n                 size_multiplier=32,\n                 random_size_range=(15, 25),\n                 random_size_interval=10,\n                 init_cfg=None):\n        super(YOLOX, self).__init__(backbone, neck, bbox_head, train_cfg,\n                                    test_cfg, pretrained, init_cfg)\n        log_img_scale(input_size, skip_square=True)\n        self.rank, self.world_size = get_dist_info()\n        self._default_input_size = input_size\n        self._input_size = input_size\n        self._random_size_range = random_size_range\n        self._random_size_interval = random_size_interval\n        self._size_multiplier = size_multiplier\n        self._progress_in_iter = 0\n\n    def forward_train(self,\n                      img,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None):\n        \"\"\"\n        Args:\n            img (Tensor): Input images of shape (N, C, H, W).\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): A List of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                :class:`mmdet.datasets.pipelines.Collect`.\n            gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): Class indices corresponding to each box\n            gt_bboxes_ignore (None | list[Tensor]): Specify which bounding\n                boxes can be ignored when computing the loss.\n        Returns:\n            dict[str, Tensor]: A dictionary of loss components.\n        \"\"\"\n        # Multi-scale training\n        img, gt_bboxes = self._preprocess(img, gt_bboxes)\n\n        losses = super(YOLOX, self).forward_train(img, img_metas, gt_bboxes,\n                                                  gt_labels, gt_bboxes_ignore)\n\n        # random resizing\n        if (self._progress_in_iter + 1) % self._random_size_interval == 0:\n            self._input_size = self._random_resize(device=img.device)\n        self._progress_in_iter += 1\n\n        return losses\n\n    def _preprocess(self, img, gt_bboxes):\n        scale_y = self._input_size[0] / self._default_input_size[0]\n        scale_x = self._input_size[1] / self._default_input_size[1]\n        if scale_x != 1 or scale_y != 1:\n            img = F.interpolate(\n                img,\n                size=self._input_size,\n                mode='bilinear',\n                align_corners=False)\n            for gt_bbox in gt_bboxes:\n                gt_bbox[..., 0::2] = gt_bbox[..., 0::2] * scale_x\n                gt_bbox[..., 1::2] = gt_bbox[..., 1::2] * scale_y\n        return img, gt_bboxes\n\n    def _random_resize(self, device):\n        tensor = torch.LongTensor(2).to(device)\n\n        if self.rank == 0:\n            size = random.randint(*self._random_size_range)\n            aspect_ratio = float(\n                self._default_input_size[1]) / self._default_input_size[0]\n            size = (self._size_multiplier * size,\n                    self._size_multiplier * int(aspect_ratio * size))\n            tensor[0] = size[0]\n            tensor[1] = size[1]\n\n        if self.world_size > 1:\n            dist.barrier()\n            dist.broadcast(tensor, 0)\n\n        input_size = (tensor[0].item(), tensor[1].item())\n        return input_size\n"
  },
  {
    "path": "mmdet/models/losses/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .accuracy import Accuracy, accuracy\nfrom .ae_loss import AssociativeEmbeddingLoss\nfrom .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss\nfrom .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,\n                                 cross_entropy, mask_cross_entropy)\nfrom .dice_loss import DiceLoss\nfrom .focal_loss import FocalLoss, sigmoid_focal_loss\nfrom .gaussian_focal_loss import GaussianFocalLoss\nfrom .gfocal_loss import DistributionFocalLoss, QualityFocalLoss\nfrom .ghm_loss import GHMC, GHMR\nfrom .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss,\n                       bounded_iou_loss, iou_loss)\nfrom .kd_loss import KnowledgeDistillationKLDivLoss\nfrom .mse_loss import MSELoss, mse_loss\nfrom .pisa_loss import carl_loss, isr_p\nfrom .seesaw_loss import SeesawLoss\nfrom .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss\nfrom .utils import reduce_loss, weight_reduce_loss, weighted_loss\nfrom .varifocal_loss import VarifocalLoss\n\n__all__ = [\n    'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',\n    'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',\n    'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',\n    'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',\n    'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC',\n    'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss',\n    'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss',\n    'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss',\n    'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss'\n]\n"
  },
  {
    "path": "mmdet/models/losses/accuracy.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch.nn as nn\n\n\n@mmcv.jit(coderize=True)\ndef accuracy(pred, target, topk=1, thresh=None):\n    \"\"\"Calculate accuracy according to the prediction and target.\n\n    Args:\n        pred (torch.Tensor): The model prediction, shape (N, num_class)\n        target (torch.Tensor): The target of each prediction, shape (N, )\n        topk (int | tuple[int], optional): If the predictions in ``topk``\n            matches the target, the predictions will be regarded as\n            correct ones. Defaults to 1.\n        thresh (float, optional): If not None, predictions with scores under\n            this threshold are considered incorrect. Default to None.\n\n    Returns:\n        float | tuple[float]: If the input ``topk`` is a single integer,\n            the function will return a single float as accuracy. If\n            ``topk`` is a tuple containing multiple integers, the\n            function will return a tuple containing accuracies of\n            each ``topk`` number.\n    \"\"\"\n    assert isinstance(topk, (int, tuple))\n    if isinstance(topk, int):\n        topk = (topk, )\n        return_single = True\n    else:\n        return_single = False\n\n    maxk = max(topk)\n    if pred.size(0) == 0:\n        accu = [pred.new_tensor(0.) for i in range(len(topk))]\n        return accu[0] if return_single else accu\n    assert pred.ndim == 2 and target.ndim == 1\n    assert pred.size(0) == target.size(0)\n    assert maxk <= pred.size(1), \\\n        f'maxk {maxk} exceeds pred dimension {pred.size(1)}'\n    pred_value, pred_label = pred.topk(maxk, dim=1)\n    pred_label = pred_label.t()  # transpose to shape (maxk, N)\n    correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))\n    if thresh is not None:\n        # Only prediction values larger than thresh are counted as correct\n        correct = correct & (pred_value > thresh).t()\n    res = []\n    for k in topk:\n        correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\n        res.append(correct_k.mul_(100.0 / pred.size(0)))\n    return res[0] if return_single else res\n\n\nclass Accuracy(nn.Module):\n\n    def __init__(self, topk=(1, ), thresh=None):\n        \"\"\"Module to calculate the accuracy.\n\n        Args:\n            topk (tuple, optional): The criterion used to calculate the\n                accuracy. Defaults to (1,).\n            thresh (float, optional): If not None, predictions with scores\n                under this threshold are considered incorrect. Default to None.\n        \"\"\"\n        super().__init__()\n        self.topk = topk\n        self.thresh = thresh\n\n    def forward(self, pred, target):\n        \"\"\"Forward function to calculate accuracy.\n\n        Args:\n            pred (torch.Tensor): Prediction of models.\n            target (torch.Tensor): Target for each prediction.\n\n        Returns:\n            tuple[float]: The accuracies under different topk criterions.\n        \"\"\"\n        return accuracy(pred, target, self.topk, self.thresh)\n"
  },
  {
    "path": "mmdet/models/losses/ae_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\n\n\n@mmcv.jit(derivate=True, coderize=True)\ndef ae_loss_per_image(tl_preds, br_preds, match):\n    \"\"\"Associative Embedding Loss in one image.\n\n    Associative Embedding Loss including two parts: pull loss and push loss.\n    Pull loss makes embedding vectors from same object closer to each other.\n    Push loss distinguish embedding vector from different objects, and makes\n        the gap between them is large enough.\n\n    During computing, usually there are 3 cases:\n        - no object in image: both pull loss and push loss will be 0.\n        - one object in image: push loss will be 0 and pull loss is computed\n            by the two corner of the only object.\n        - more than one objects in image: pull loss is computed by corner pairs\n            from each object, push loss is computed by each object with all\n            other objects. We use confusion matrix with 0 in diagonal to\n            compute the push loss.\n\n    Args:\n        tl_preds (tensor): Embedding feature map of left-top corner.\n        br_preds (tensor): Embedding feature map of bottim-right corner.\n        match (list): Downsampled coordinates pair of each ground truth box.\n    \"\"\"\n\n    tl_list, br_list, me_list = [], [], []\n    if len(match) == 0:  # no object in image\n        pull_loss = tl_preds.sum() * 0.\n        push_loss = tl_preds.sum() * 0.\n    else:\n        for m in match:\n            [tl_y, tl_x], [br_y, br_x] = m\n            tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)\n            br_e = br_preds[:, br_y, br_x].view(-1, 1)\n            tl_list.append(tl_e)\n            br_list.append(br_e)\n            me_list.append((tl_e + br_e) / 2.0)\n\n        tl_list = torch.cat(tl_list)\n        br_list = torch.cat(br_list)\n        me_list = torch.cat(me_list)\n\n        assert tl_list.size() == br_list.size()\n\n        # N is object number in image, M is dimension of embedding vector\n        N, M = tl_list.size()\n\n        pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)\n        pull_loss = pull_loss.sum() / N\n\n        margin = 1  # exp setting of CornerNet, details in section 3.3 of paper\n\n        # confusion matrix of push loss\n        conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list\n        conf_weight = 1 - torch.eye(N).type_as(me_list)\n        conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())\n\n        if N > 1:  # more than one object in current image\n            push_loss = F.relu(conf_mat).sum() / (N * (N - 1))\n        else:\n            push_loss = tl_preds.sum() * 0.\n\n    return pull_loss, push_loss\n\n\n@LOSSES.register_module()\nclass AssociativeEmbeddingLoss(nn.Module):\n    \"\"\"Associative Embedding Loss.\n\n    More details can be found in\n    `Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and\n    `CornerNet <https://arxiv.org/abs/1808.01244>`_ .\n    Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_  # noqa: E501\n\n    Args:\n        pull_weight (float): Loss weight for corners from same object.\n        push_weight (float): Loss weight for corners from different object.\n    \"\"\"\n\n    def __init__(self, pull_weight=0.25, push_weight=0.25):\n        super(AssociativeEmbeddingLoss, self).__init__()\n        self.pull_weight = pull_weight\n        self.push_weight = push_weight\n\n    def forward(self, pred, target, match):\n        \"\"\"Forward function.\"\"\"\n        batch = pred.size(0)\n        pull_all, push_all = 0.0, 0.0\n        for i in range(batch):\n            pull, push = ae_loss_per_image(pred[i], target[i], match[i])\n\n            pull_all += self.pull_weight * pull\n            push_all += self.push_weight * push\n\n        return pull_all, push_all\n"
  },
  {
    "path": "mmdet/models/losses/balanced_l1_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom ..builder import LOSSES\nfrom .utils import weighted_loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef balanced_l1_loss(pred,\n                     target,\n                     beta=1.0,\n                     alpha=0.5,\n                     gamma=1.5,\n                     reduction='mean'):\n    \"\"\"Calculate balanced L1 loss.\n\n    Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, 4).\n        target (torch.Tensor): The learning target of the prediction with\n            shape (N, 4).\n        beta (float): The loss is a piecewise function of prediction and target\n            and ``beta`` serves as a threshold for the difference between the\n            prediction and target. Defaults to 1.0.\n        alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n            Defaults to 0.5.\n        gamma (float): The ``gamma`` in the balanced L1 loss.\n            Defaults to 1.5.\n        reduction (str, optional): The method that reduces the loss to a\n            scalar. Options are \"none\", \"mean\" and \"sum\".\n\n    Returns:\n        torch.Tensor: The calculated loss\n    \"\"\"\n    assert beta > 0\n    if target.numel() == 0:\n        return pred.sum() * 0\n\n    assert pred.size() == target.size()\n\n    diff = torch.abs(pred - target)\n    b = np.e**(gamma / alpha) - 1\n    loss = torch.where(\n        diff < beta, alpha / b *\n        (b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,\n        gamma * diff + gamma / b - alpha * beta)\n\n    return loss\n\n\n@LOSSES.register_module()\nclass BalancedL1Loss(nn.Module):\n    \"\"\"Balanced L1 Loss.\n\n    arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)\n\n    Args:\n        alpha (float): The denominator ``alpha`` in the balanced L1 loss.\n            Defaults to 0.5.\n        gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.\n        beta (float, optional): The loss is a piecewise function of prediction\n            and target. ``beta`` serves as a threshold for the difference\n            between the prediction and target. Defaults to 1.0.\n        reduction (str, optional): The method that reduces the loss to a\n            scalar. Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n    \"\"\"\n\n    def __init__(self,\n                 alpha=0.5,\n                 gamma=1.5,\n                 beta=1.0,\n                 reduction='mean',\n                 loss_weight=1.0):\n        super(BalancedL1Loss, self).__init__()\n        self.alpha = alpha\n        self.gamma = gamma\n        self.beta = beta\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None,\n                **kwargs):\n        \"\"\"Forward function of loss.\n\n        Args:\n            pred (torch.Tensor): The prediction with shape (N, 4).\n            target (torch.Tensor): The learning target of the prediction with\n                shape (N, 4).\n            weight (torch.Tensor, optional): Sample-wise loss weight with\n                shape (N, ).\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            torch.Tensor: The calculated loss\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss_bbox = self.loss_weight * balanced_l1_loss(\n            pred,\n            target,\n            weight,\n            alpha=self.alpha,\n            gamma=self.gamma,\n            beta=self.beta,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss_bbox\n"
  },
  {
    "path": "mmdet/models/losses/cross_entropy_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\nfrom .utils import weight_reduce_loss\n\n\ndef cross_entropy(pred,\n                  label,\n                  weight=None,\n                  reduction='mean',\n                  avg_factor=None,\n                  class_weight=None,\n                  ignore_index=-100,\n                  avg_non_ignore=False):\n    \"\"\"Calculate the CrossEntropy loss.\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, C), C is the number\n            of classes.\n        label (torch.Tensor): The learning label of the prediction.\n        weight (torch.Tensor, optional): Sample-wise loss weight.\n        reduction (str, optional): The method used to reduce the loss.\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n        class_weight (list[float], optional): The weight for each class.\n        ignore_index (int | None): The label index to be ignored.\n            If None, it will be set to default value. Default: -100.\n        avg_non_ignore (bool): The flag decides to whether the loss is\n            only averaged over non-ignored targets. Default: False.\n\n    Returns:\n        torch.Tensor: The calculated loss\n    \"\"\"\n    # The default value of ignore_index is the same as F.cross_entropy\n    ignore_index = -100 if ignore_index is None else ignore_index\n    # element-wise losses\n    loss = F.cross_entropy(\n        pred,\n        label,\n        weight=class_weight,\n        reduction='none',\n        ignore_index=ignore_index)\n\n    # average loss over non-ignored elements\n    # pytorch's official cross_entropy average loss over non-ignored elements\n    # refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660  # noqa\n    if (avg_factor is None) and avg_non_ignore and reduction == 'mean':\n        avg_factor = label.numel() - (label == ignore_index).sum().item()\n\n    # apply weights and do the reduction\n    if weight is not None:\n        weight = weight.float()\n    loss = weight_reduce_loss(\n        loss, weight=weight, reduction=reduction, avg_factor=avg_factor)\n\n    return loss\n\n\ndef _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):\n    \"\"\"Expand onehot labels to match the size of prediction.\"\"\"\n    bin_labels = labels.new_full((labels.size(0), label_channels), 0)\n    valid_mask = (labels >= 0) & (labels != ignore_index)\n    inds = torch.nonzero(\n        valid_mask & (labels < label_channels), as_tuple=False)\n\n    if inds.numel() > 0:\n        bin_labels[inds, labels[inds]] = 1\n\n    valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),\n                                               label_channels).float()\n    if label_weights is None:\n        bin_label_weights = valid_mask\n    else:\n        bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)\n        bin_label_weights *= valid_mask\n\n    return bin_labels, bin_label_weights, valid_mask\n\n\ndef binary_cross_entropy(pred,\n                         label,\n                         weight=None,\n                         reduction='mean',\n                         avg_factor=None,\n                         class_weight=None,\n                         ignore_index=-100,\n                         avg_non_ignore=False):\n    \"\"\"Calculate the binary CrossEntropy loss.\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, 1) or (N, ).\n            When the shape of pred is (N, 1), label will be expanded to\n            one-hot format, and when the shape of pred is (N, ), label\n            will not be expanded to one-hot format.\n        label (torch.Tensor): The learning label of the prediction,\n            with shape (N, ).\n        weight (torch.Tensor, optional): Sample-wise loss weight.\n        reduction (str, optional): The method used to reduce the loss.\n            Options are \"none\", \"mean\" and \"sum\".\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n        class_weight (list[float], optional): The weight for each class.\n        ignore_index (int | None): The label index to be ignored.\n            If None, it will be set to default value. Default: -100.\n        avg_non_ignore (bool): The flag decides to whether the loss is\n            only averaged over non-ignored targets. Default: False.\n\n    Returns:\n        torch.Tensor: The calculated loss.\n    \"\"\"\n    # The default value of ignore_index is the same as F.cross_entropy\n    ignore_index = -100 if ignore_index is None else ignore_index\n\n    if pred.dim() != label.dim():\n        label, weight, valid_mask = _expand_onehot_labels(\n            label, weight, pred.size(-1), ignore_index)\n    else:\n        # should mask out the ignored elements\n        valid_mask = ((label >= 0) & (label != ignore_index)).float()\n        if weight is not None:\n            # The inplace writing method will have a mismatched broadcast\n            # shape error if the weight and valid_mask dimensions\n            # are inconsistent such as (B,N,1) and (B,N,C).\n            weight = weight * valid_mask\n        else:\n            weight = valid_mask\n\n    # average loss over non-ignored elements\n    if (avg_factor is None) and avg_non_ignore and reduction == 'mean':\n        avg_factor = valid_mask.sum().item()\n\n    # weighted element-wise losses\n    weight = weight.float()\n    loss = F.binary_cross_entropy_with_logits(\n        pred, label.float(), pos_weight=class_weight, reduction='none')\n    # do the reduction for the weighted loss\n    loss = weight_reduce_loss(\n        loss, weight, reduction=reduction, avg_factor=avg_factor)\n\n    return loss\n\n\ndef mask_cross_entropy(pred,\n                       target,\n                       label,\n                       reduction='mean',\n                       avg_factor=None,\n                       class_weight=None,\n                       ignore_index=None,\n                       **kwargs):\n    \"\"\"Calculate the CrossEntropy loss for masks.\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, C, *), C is the\n            number of classes. The trailing * indicates arbitrary shape.\n        target (torch.Tensor): The learning label of the prediction.\n        label (torch.Tensor): ``label`` indicates the class label of the mask\n            corresponding object. This will be used to select the mask in the\n            of the class which the object belongs to when the mask prediction\n            if not class-agnostic.\n        reduction (str, optional): The method used to reduce the loss.\n            Options are \"none\", \"mean\" and \"sum\".\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n        class_weight (list[float], optional): The weight for each class.\n        ignore_index (None): Placeholder, to be consistent with other loss.\n            Default: None.\n\n    Returns:\n        torch.Tensor: The calculated loss\n\n    Example:\n        >>> N, C = 3, 11\n        >>> H, W = 2, 2\n        >>> pred = torch.randn(N, C, H, W) * 1000\n        >>> target = torch.rand(N, H, W)\n        >>> label = torch.randint(0, C, size=(N,))\n        >>> reduction = 'mean'\n        >>> avg_factor = None\n        >>> class_weights = None\n        >>> loss = mask_cross_entropy(pred, target, label, reduction,\n        >>>                           avg_factor, class_weights)\n        >>> assert loss.shape == (1,)\n    \"\"\"\n    assert ignore_index is None, 'BCE loss does not support ignore_index'\n    # TODO: handle these two reserved arguments\n    assert reduction == 'mean' and avg_factor is None\n    num_rois = pred.size()[0]\n    inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)\n    pred_slice = pred[inds, label].squeeze(1)\n    return F.binary_cross_entropy_with_logits(\n        pred_slice, target, weight=class_weight, reduction='mean')[None]\n\n\n@LOSSES.register_module()\nclass CrossEntropyLoss(nn.Module):\n\n    def __init__(self,\n                 use_sigmoid=False,\n                 use_mask=False,\n                 reduction='mean',\n                 class_weight=None,\n                 ignore_index=None,\n                 loss_weight=1.0,\n                 avg_non_ignore=False):\n        \"\"\"CrossEntropyLoss.\n\n        Args:\n            use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n                of softmax. Defaults to False.\n            use_mask (bool, optional): Whether to use mask cross entropy loss.\n                Defaults to False.\n            reduction (str, optional): . Defaults to 'mean'.\n                Options are \"none\", \"mean\" and \"sum\".\n            class_weight (list[float], optional): Weight of each class.\n                Defaults to None.\n            ignore_index (int | None): The label index to be ignored.\n                Defaults to None.\n            loss_weight (float, optional): Weight of the loss. Defaults to 1.0.\n            avg_non_ignore (bool): The flag decides to whether the loss is\n                only averaged over non-ignored targets. Default: False.\n        \"\"\"\n        super(CrossEntropyLoss, self).__init__()\n        assert (use_sigmoid is False) or (use_mask is False)\n        self.use_sigmoid = use_sigmoid\n        self.use_mask = use_mask\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.class_weight = class_weight\n        self.ignore_index = ignore_index\n        self.avg_non_ignore = avg_non_ignore\n        if ((ignore_index is not None) and not self.avg_non_ignore\n                and self.reduction == 'mean'):\n            warnings.warn(\n                'Default ``avg_non_ignore`` is False, if you would like to '\n                'ignore the certain label and average loss over non-ignore '\n                'labels, which is the same with PyTorch official '\n                'cross_entropy, set ``avg_non_ignore=True``.')\n\n        if self.use_sigmoid:\n            self.cls_criterion = binary_cross_entropy\n        elif self.use_mask:\n            self.cls_criterion = mask_cross_entropy\n        else:\n            self.cls_criterion = cross_entropy\n\n    def extra_repr(self):\n        \"\"\"Extra repr.\"\"\"\n        s = f'avg_non_ignore={self.avg_non_ignore}'\n        return s\n\n    def forward(self,\n                cls_score,\n                label,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None,\n                ignore_index=None,\n                **kwargs):\n        \"\"\"Forward function.\n\n        Args:\n            cls_score (torch.Tensor): The prediction.\n            label (torch.Tensor): The learning label of the prediction.\n            weight (torch.Tensor, optional): Sample-wise loss weight.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The method used to reduce the\n                loss. Options are \"none\", \"mean\" and \"sum\".\n            ignore_index (int | None): The label index to be ignored.\n                If not None, it will override the default value. Default: None.\n        Returns:\n            torch.Tensor: The calculated loss.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if ignore_index is None:\n            ignore_index = self.ignore_index\n\n        if self.class_weight is not None:\n            class_weight = cls_score.new_tensor(\n                self.class_weight, device=cls_score.device)\n        else:\n            class_weight = None\n        loss_cls = self.loss_weight * self.cls_criterion(\n            cls_score,\n            label,\n            weight,\n            class_weight=class_weight,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            ignore_index=ignore_index,\n            avg_non_ignore=self.avg_non_ignore,\n            **kwargs)\n        return loss_cls\n"
  },
  {
    "path": "mmdet/models/losses/dice_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\n\nfrom ..builder import LOSSES\nfrom .utils import weight_reduce_loss\n\n\ndef dice_loss(pred,\n              target,\n              weight=None,\n              eps=1e-3,\n              reduction='mean',\n              naive_dice=False,\n              avg_factor=None):\n    \"\"\"Calculate dice loss, there are two forms of dice loss is supported:\n\n        - the one proposed in `V-Net: Fully Convolutional Neural\n            Networks for Volumetric Medical Image Segmentation\n            <https://arxiv.org/abs/1606.04797>`_.\n        - the dice loss in which the power of the number in the\n            denominator is the first power instead of the second\n            power.\n\n    Args:\n        pred (torch.Tensor): The prediction, has a shape (n, *)\n        target (torch.Tensor): The learning label of the prediction,\n            shape (n, *), same shape of pred.\n        weight (torch.Tensor, optional): The weight of loss for each\n            prediction, has a shape (n,). Defaults to None.\n        eps (float): Avoid dividing by zero. Default: 1e-3.\n        reduction (str, optional): The method used to reduce the loss into\n            a scalar. Defaults to 'mean'.\n            Options are \"none\", \"mean\" and \"sum\".\n        naive_dice (bool, optional): If false, use the dice\n                loss defined in the V-Net paper, otherwise, use the\n                naive dice loss in which the power of the number in the\n                denominator is the first power instead of the second\n                power.Defaults to False.\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n    \"\"\"\n\n    input = pred.flatten(1)\n    target = target.flatten(1).float()\n\n    a = torch.sum(input * target, 1)\n    if naive_dice:\n        b = torch.sum(input, 1)\n        c = torch.sum(target, 1)\n        d = (2 * a + eps) / (b + c + eps)\n    else:\n        b = torch.sum(input * input, 1) + eps\n        c = torch.sum(target * target, 1) + eps\n        d = (2 * a) / (b + c)\n\n    loss = 1 - d\n    if weight is not None:\n        assert weight.ndim == loss.ndim\n        assert len(weight) == len(pred)\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\n@LOSSES.register_module()\nclass DiceLoss(nn.Module):\n\n    def __init__(self,\n                 use_sigmoid=True,\n                 activate=True,\n                 reduction='mean',\n                 naive_dice=False,\n                 loss_weight=1.0,\n                 eps=1e-3):\n        \"\"\"Compute dice loss.\n\n        Args:\n            use_sigmoid (bool, optional): Whether to the prediction is\n                used for sigmoid or softmax. Defaults to True.\n            activate (bool): Whether to activate the predictions inside,\n                this will disable the inside sigmoid operation.\n                Defaults to True.\n            reduction (str, optional): The method used\n                to reduce the loss. Options are \"none\",\n                \"mean\" and \"sum\". Defaults to 'mean'.\n            naive_dice (bool, optional): If false, use the dice\n                loss defined in the V-Net paper, otherwise, use the\n                naive dice loss in which the power of the number in the\n                denominator is the first power instead of the second\n                power. Defaults to False.\n            loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n            eps (float): Avoid dividing by zero. Defaults to 1e-3.\n        \"\"\"\n\n        super(DiceLoss, self).__init__()\n        self.use_sigmoid = use_sigmoid\n        self.reduction = reduction\n        self.naive_dice = naive_dice\n        self.loss_weight = loss_weight\n        self.eps = eps\n        self.activate = activate\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                reduction_override=None,\n                avg_factor=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): The prediction, has a shape (n, *).\n            target (torch.Tensor): The label of the prediction,\n                shape (n, *), same shape of pred.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction, has a shape (n,). Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            torch.Tensor: The calculated loss\n        \"\"\"\n\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n\n        if self.activate:\n            if self.use_sigmoid:\n                pred = pred.sigmoid()\n            else:\n                raise NotImplementedError\n\n        loss = self.loss_weight * dice_loss(\n            pred,\n            target,\n            weight,\n            eps=self.eps,\n            reduction=reduction,\n            naive_dice=self.naive_dice,\n            avg_factor=avg_factor)\n\n        return loss\n"
  },
  {
    "path": "mmdet/models/losses/focal_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss\n\nfrom ..builder import LOSSES\nfrom .utils import weight_reduce_loss\n\n\n# This method is only for debugging\ndef py_sigmoid_focal_loss(pred,\n                          target,\n                          weight=None,\n                          gamma=2.0,\n                          alpha=0.25,\n                          reduction='mean',\n                          avg_factor=None):\n    \"\"\"PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, C), C is the\n            number of classes\n        target (torch.Tensor): The learning label of the prediction.\n        weight (torch.Tensor, optional): Sample-wise loss weight.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 2.0.\n        alpha (float, optional): A balanced form for Focal Loss.\n            Defaults to 0.25.\n        reduction (str, optional): The method used to reduce the loss into\n            a scalar. Defaults to 'mean'.\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n    \"\"\"\n    pred_sigmoid = pred.sigmoid()\n    target = target.type_as(pred)\n    pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)\n    focal_weight = (alpha * target + (1 - alpha) *\n                    (1 - target)) * pt.pow(gamma)\n    loss = F.binary_cross_entropy_with_logits(\n        pred, target, reduction='none') * focal_weight\n    if weight is not None:\n        if weight.shape != loss.shape:\n            if weight.size(0) == loss.size(0):\n                # For most cases, weight is of shape (num_priors, ),\n                #  which means it does not have the second axis num_class\n                weight = weight.view(-1, 1)\n            else:\n                # Sometimes, weight per anchor per class is also needed. e.g.\n                #  in FSAF. But it may be flattened of shape\n                #  (num_priors x num_class, ), while loss is still of shape\n                #  (num_priors, num_class).\n                assert weight.numel() == loss.numel()\n                weight = weight.view(loss.size(0), -1)\n        assert weight.ndim == loss.ndim\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\ndef py_focal_loss_with_prob(pred,\n                            target,\n                            weight=None,\n                            gamma=2.0,\n                            alpha=0.25,\n                            reduction='mean',\n                            avg_factor=None):\n    \"\"\"PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.\n    Different from `py_sigmoid_focal_loss`, this function accepts probability\n    as input.\n\n    Args:\n        pred (torch.Tensor): The prediction probability with shape (N, C),\n            C is the number of classes.\n        target (torch.Tensor): The learning label of the prediction.\n        weight (torch.Tensor, optional): Sample-wise loss weight.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 2.0.\n        alpha (float, optional): A balanced form for Focal Loss.\n            Defaults to 0.25.\n        reduction (str, optional): The method used to reduce the loss into\n            a scalar. Defaults to 'mean'.\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n    \"\"\"\n    num_classes = pred.size(1)\n    target = F.one_hot(target, num_classes=num_classes + 1)\n    target = target[:, :num_classes]\n\n    target = target.type_as(pred)\n    pt = (1 - pred) * target + pred * (1 - target)\n    focal_weight = (alpha * target + (1 - alpha) *\n                    (1 - target)) * pt.pow(gamma)\n    loss = F.binary_cross_entropy(\n        pred, target, reduction='none') * focal_weight\n    if weight is not None:\n        if weight.shape != loss.shape:\n            if weight.size(0) == loss.size(0):\n                # For most cases, weight is of shape (num_priors, ),\n                #  which means it does not have the second axis num_class\n                weight = weight.view(-1, 1)\n            else:\n                # Sometimes, weight per anchor per class is also needed. e.g.\n                #  in FSAF. But it may be flattened of shape\n                #  (num_priors x num_class, ), while loss is still of shape\n                #  (num_priors, num_class).\n                assert weight.numel() == loss.numel()\n                weight = weight.view(loss.size(0), -1)\n        assert weight.ndim == loss.ndim\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\ndef sigmoid_focal_loss(pred,\n                       target,\n                       weight=None,\n                       gamma=2.0,\n                       alpha=0.25,\n                       reduction='mean',\n                       avg_factor=None):\n    r\"\"\"A wrapper of cuda version `Focal Loss\n    <https://arxiv.org/abs/1708.02002>`_.\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, C), C is the number\n            of classes.\n        target (torch.Tensor): The learning label of the prediction.\n        weight (torch.Tensor, optional): Sample-wise loss weight.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 2.0.\n        alpha (float, optional): A balanced form for Focal Loss.\n            Defaults to 0.25.\n        reduction (str, optional): The method used to reduce the loss into\n            a scalar. Defaults to 'mean'. Options are \"none\", \"mean\" and \"sum\".\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n    \"\"\"\n    # Function.apply does not accept keyword arguments, so the decorator\n    # \"weighted_loss\" is not applicable\n    loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma,\n                               alpha, None, 'none')\n    if weight is not None:\n        if weight.shape != loss.shape:\n            if weight.size(0) == loss.size(0):\n                # For most cases, weight is of shape (num_priors, ),\n                #  which means it does not have the second axis num_class\n                weight = weight.view(-1, 1)\n            else:\n                # Sometimes, weight per anchor per class is also needed. e.g.\n                #  in FSAF. But it may be flattened of shape\n                #  (num_priors x num_class, ), while loss is still of shape\n                #  (num_priors, num_class).\n                assert weight.numel() == loss.numel()\n                weight = weight.view(loss.size(0), -1)\n        assert weight.ndim == loss.ndim\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\n@LOSSES.register_module()\nclass FocalLoss(nn.Module):\n\n    def __init__(self,\n                 use_sigmoid=True,\n                 gamma=2.0,\n                 alpha=0.25,\n                 reduction='mean',\n                 loss_weight=1.0,\n                 activated=False):\n        \"\"\"`Focal Loss <https://arxiv.org/abs/1708.02002>`_\n\n        Args:\n            use_sigmoid (bool, optional): Whether to the prediction is\n                used for sigmoid or softmax. Defaults to True.\n            gamma (float, optional): The gamma for calculating the modulating\n                factor. Defaults to 2.0.\n            alpha (float, optional): A balanced form for Focal Loss.\n                Defaults to 0.25.\n            reduction (str, optional): The method used to reduce the loss into\n                a scalar. Defaults to 'mean'. Options are \"none\", \"mean\" and\n                \"sum\".\n            loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n            activated (bool, optional): Whether the input is activated.\n                If True, it means the input has been activated and can be\n                treated as probabilities. Else, it should be treated as logits.\n                Defaults to False.\n        \"\"\"\n        super(FocalLoss, self).__init__()\n        assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'\n        self.use_sigmoid = use_sigmoid\n        self.gamma = gamma\n        self.alpha = alpha\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.activated = activated\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): The prediction.\n            target (torch.Tensor): The learning label of the prediction.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            torch.Tensor: The calculated loss\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if self.use_sigmoid:\n            if self.activated:\n                calculate_loss_func = py_focal_loss_with_prob\n            else:\n                if torch.cuda.is_available() and pred.is_cuda:\n                    calculate_loss_func = sigmoid_focal_loss\n                else:\n                    num_classes = pred.size(1)\n                    target = F.one_hot(target, num_classes=num_classes + 1)\n                    target = target[:, :num_classes]\n                    calculate_loss_func = py_sigmoid_focal_loss\n\n            loss_cls = self.loss_weight * calculate_loss_func(\n                pred,\n                target,\n                weight,\n                gamma=self.gamma,\n                alpha=self.alpha,\n                reduction=reduction,\n                avg_factor=avg_factor)\n\n        else:\n            raise NotImplementedError\n        return loss_cls\n"
  },
  {
    "path": "mmdet/models/losses/gaussian_focal_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch.nn as nn\n\nfrom ..builder import LOSSES\nfrom .utils import weighted_loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):\n    \"\"\"`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian\n    distribution.\n\n    Args:\n        pred (torch.Tensor): The prediction.\n        gaussian_target (torch.Tensor): The learning target of the prediction\n            in gaussian distribution.\n        alpha (float, optional): A balanced form for Focal Loss.\n            Defaults to 2.0.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 4.0.\n    \"\"\"\n    eps = 1e-12\n    pos_weights = gaussian_target.eq(1)\n    neg_weights = (1 - gaussian_target).pow(gamma)\n    pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights\n    neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights\n    return pos_loss + neg_loss\n\n\n@LOSSES.register_module()\nclass GaussianFocalLoss(nn.Module):\n    \"\"\"GaussianFocalLoss is a variant of focal loss.\n\n    More details can be found in the `paper\n    <https://arxiv.org/abs/1808.01244>`_\n    Code is modified from `kp_utils.py\n    <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_  # noqa: E501\n    Please notice that the target in GaussianFocalLoss is a gaussian heatmap,\n    not 0/1 binary target.\n\n    Args:\n        alpha (float): Power of prediction.\n        gamma (float): Power of target for negative samples.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Loss weight of current loss.\n    \"\"\"\n\n    def __init__(self,\n                 alpha=2.0,\n                 gamma=4.0,\n                 reduction='mean',\n                 loss_weight=1.0):\n        super(GaussianFocalLoss, self).__init__()\n        self.alpha = alpha\n        self.gamma = gamma\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): The prediction.\n            target (torch.Tensor): The learning target of the prediction\n                in gaussian distribution.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss_reg = self.loss_weight * gaussian_focal_loss(\n            pred,\n            target,\n            weight,\n            alpha=self.alpha,\n            gamma=self.gamma,\n            reduction=reduction,\n            avg_factor=avg_factor)\n        return loss_reg\n"
  },
  {
    "path": "mmdet/models/losses/gfocal_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\nfrom .utils import weighted_loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef quality_focal_loss(pred, target, beta=2.0):\n    r\"\"\"Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n    Qualified and Distributed Bounding Boxes for Dense Object Detection\n    <https://arxiv.org/abs/2006.04388>`_.\n\n    Args:\n        pred (torch.Tensor): Predicted joint representation of classification\n            and quality (IoU) estimation with shape (N, C), C is the number of\n            classes.\n        target (tuple([torch.Tensor])): Target category label with shape (N,)\n            and target quality label with shape (N,).\n        beta (float): The beta parameter for calculating the modulating factor.\n            Defaults to 2.0.\n\n    Returns:\n        torch.Tensor: Loss tensor with shape (N,).\n    \"\"\"\n    assert len(target) == 2, \"\"\"target for QFL must be a tuple of two elements,\n        including category label and quality label, respectively\"\"\"\n    # label denotes the category id, score denotes the quality score\n    label, score = target\n\n    # negatives are supervised by 0 quality score\n    pred_sigmoid = pred.sigmoid()\n    scale_factor = pred_sigmoid\n    zerolabel = scale_factor.new_zeros(pred.shape)\n    loss = F.binary_cross_entropy_with_logits(\n        pred, zerolabel, reduction='none') * scale_factor.pow(beta)\n\n    # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n    bg_class_ind = pred.size(1)\n    pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)\n    pos_label = label[pos].long()\n    # positives are supervised by bbox quality (IoU) score\n    scale_factor = score[pos] - pred_sigmoid[pos, pos_label]\n    loss[pos, pos_label] = F.binary_cross_entropy_with_logits(\n        pred[pos, pos_label], score[pos],\n        reduction='none') * scale_factor.abs().pow(beta)\n\n    loss = loss.sum(dim=1, keepdim=False)\n    return loss\n\n\n@weighted_loss\ndef quality_focal_loss_with_prob(pred, target, beta=2.0):\n    r\"\"\"Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning\n    Qualified and Distributed Bounding Boxes for Dense Object Detection\n    <https://arxiv.org/abs/2006.04388>`_.\n    Different from `quality_focal_loss`, this function accepts probability\n    as input.\n\n    Args:\n        pred (torch.Tensor): Predicted joint representation of classification\n            and quality (IoU) estimation with shape (N, C), C is the number of\n            classes.\n        target (tuple([torch.Tensor])): Target category label with shape (N,)\n            and target quality label with shape (N,).\n        beta (float): The beta parameter for calculating the modulating factor.\n            Defaults to 2.0.\n\n    Returns:\n        torch.Tensor: Loss tensor with shape (N,).\n    \"\"\"\n    assert len(target) == 2, \"\"\"target for QFL must be a tuple of two elements,\n        including category label and quality label, respectively\"\"\"\n    # label denotes the category id, score denotes the quality score\n    label, score = target\n\n    # negatives are supervised by 0 quality score\n    pred_sigmoid = pred\n    scale_factor = pred_sigmoid\n    zerolabel = scale_factor.new_zeros(pred.shape)\n    loss = F.binary_cross_entropy(\n        pred, zerolabel, reduction='none') * scale_factor.pow(beta)\n\n    # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\n    bg_class_ind = pred.size(1)\n    pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)\n    pos_label = label[pos].long()\n    # positives are supervised by bbox quality (IoU) score\n    scale_factor = score[pos] - pred_sigmoid[pos, pos_label]\n    loss[pos, pos_label] = F.binary_cross_entropy(\n        pred[pos, pos_label], score[pos],\n        reduction='none') * scale_factor.abs().pow(beta)\n\n    loss = loss.sum(dim=1, keepdim=False)\n    return loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef distribution_focal_loss(pred, label):\n    r\"\"\"Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning\n    Qualified and Distributed Bounding Boxes for Dense Object Detection\n    <https://arxiv.org/abs/2006.04388>`_.\n\n    Args:\n        pred (torch.Tensor): Predicted general distribution of bounding boxes\n            (before softmax) with shape (N, n+1), n is the max value of the\n            integral set `{0, ..., n}` in paper.\n        label (torch.Tensor): Target distance label for bounding boxes with\n            shape (N,).\n\n    Returns:\n        torch.Tensor: Loss tensor with shape (N,).\n    \"\"\"\n    dis_left = label.long()\n    dis_right = dis_left + 1\n    weight_left = dis_right.float() - label\n    weight_right = label - dis_left.float()\n    loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \\\n        + F.cross_entropy(pred, dis_right, reduction='none') * weight_right\n    return loss\n\n\n@LOSSES.register_module()\nclass QualityFocalLoss(nn.Module):\n    r\"\"\"Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:\n    Learning Qualified and Distributed Bounding Boxes for Dense Object\n    Detection <https://arxiv.org/abs/2006.04388>`_.\n\n    Args:\n        use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.\n            Defaults to True.\n        beta (float): The beta parameter for calculating the modulating factor.\n            Defaults to 2.0.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Loss weight of current loss.\n        activated (bool, optional): Whether the input is activated.\n            If True, it means the input has been activated and can be\n            treated as probabilities. Else, it should be treated as logits.\n            Defaults to False.\n    \"\"\"\n\n    def __init__(self,\n                 use_sigmoid=True,\n                 beta=2.0,\n                 reduction='mean',\n                 loss_weight=1.0,\n                 activated=False):\n        super(QualityFocalLoss, self).__init__()\n        assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'\n        self.use_sigmoid = use_sigmoid\n        self.beta = beta\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.activated = activated\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): Predicted joint representation of\n                classification and quality (IoU) estimation with shape (N, C),\n                C is the number of classes.\n            target (tuple([torch.Tensor])): Target category label with shape\n                (N,) and target quality label with shape (N,).\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if self.use_sigmoid:\n            if self.activated:\n                calculate_loss_func = quality_focal_loss_with_prob\n            else:\n                calculate_loss_func = quality_focal_loss\n            loss_cls = self.loss_weight * calculate_loss_func(\n                pred,\n                target,\n                weight,\n                beta=self.beta,\n                reduction=reduction,\n                avg_factor=avg_factor)\n        else:\n            raise NotImplementedError\n        return loss_cls\n\n\n@LOSSES.register_module()\nclass DistributionFocalLoss(nn.Module):\n    r\"\"\"Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:\n    Learning Qualified and Distributed Bounding Boxes for Dense Object\n    Detection <https://arxiv.org/abs/2006.04388>`_.\n\n    Args:\n        reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n        loss_weight (float): Loss weight of current loss.\n    \"\"\"\n\n    def __init__(self, reduction='mean', loss_weight=1.0):\n        super(DistributionFocalLoss, self).__init__()\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): Predicted general distribution of bounding\n                boxes (before softmax) with shape (N, n+1), n is the max value\n                of the integral set `{0, ..., n}` in paper.\n            target (torch.Tensor): Target distance label for bounding boxes\n                with shape (N,).\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss_cls = self.loss_weight * distribution_focal_loss(\n            pred, target, weight, reduction=reduction, avg_factor=avg_factor)\n        return loss_cls\n"
  },
  {
    "path": "mmdet/models/losses/ghm_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\nfrom .utils import weight_reduce_loss\n\n\ndef _expand_onehot_labels(labels, label_weights, label_channels):\n    bin_labels = labels.new_full((labels.size(0), label_channels), 0)\n    inds = torch.nonzero(\n        (labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()\n    if inds.numel() > 0:\n        bin_labels[inds, labels[inds]] = 1\n    bin_label_weights = label_weights.view(-1, 1).expand(\n        label_weights.size(0), label_channels)\n    return bin_labels, bin_label_weights\n\n\n# TODO: code refactoring to make it consistent with other losses\n@LOSSES.register_module()\nclass GHMC(nn.Module):\n    \"\"\"GHM Classification Loss.\n\n    Details of the theorem can be viewed in the paper\n    `Gradient Harmonized Single-stage Detector\n    <https://arxiv.org/abs/1811.05181>`_.\n\n    Args:\n        bins (int): Number of the unit regions for distribution calculation.\n        momentum (float): The parameter for moving average.\n        use_sigmoid (bool): Can only be true for BCE based loss now.\n        loss_weight (float): The weight of the total GHM-C loss.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n            Defaults to \"mean\"\n    \"\"\"\n\n    def __init__(self,\n                 bins=10,\n                 momentum=0,\n                 use_sigmoid=True,\n                 loss_weight=1.0,\n                 reduction='mean'):\n        super(GHMC, self).__init__()\n        self.bins = bins\n        self.momentum = momentum\n        edges = torch.arange(bins + 1).float() / bins\n        self.register_buffer('edges', edges)\n        self.edges[-1] += 1e-6\n        if momentum > 0:\n            acc_sum = torch.zeros(bins)\n            self.register_buffer('acc_sum', acc_sum)\n        self.use_sigmoid = use_sigmoid\n        if not self.use_sigmoid:\n            raise NotImplementedError\n        self.loss_weight = loss_weight\n        self.reduction = reduction\n\n    def forward(self,\n                pred,\n                target,\n                label_weight,\n                reduction_override=None,\n                **kwargs):\n        \"\"\"Calculate the GHM-C loss.\n\n        Args:\n            pred (float tensor of size [batch_num, class_num]):\n                The direct prediction of classification fc layer.\n            target (float tensor of size [batch_num, class_num]):\n                Binary class target for each sample.\n            label_weight (float tensor of size [batch_num, class_num]):\n                the value is 1 if the sample is valid and 0 if ignored.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        Returns:\n            The gradient harmonized loss.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        # the target should be binary class label\n        if pred.dim() != target.dim():\n            target, label_weight = _expand_onehot_labels(\n                target, label_weight, pred.size(-1))\n        target, label_weight = target.float(), label_weight.float()\n        edges = self.edges\n        mmt = self.momentum\n        weights = torch.zeros_like(pred)\n\n        # gradient length\n        g = torch.abs(pred.sigmoid().detach() - target)\n\n        valid = label_weight > 0\n        tot = max(valid.float().sum().item(), 1.0)\n        n = 0  # n valid bins\n        for i in range(self.bins):\n            inds = (g >= edges[i]) & (g < edges[i + 1]) & valid\n            num_in_bin = inds.sum().item()\n            if num_in_bin > 0:\n                if mmt > 0:\n                    self.acc_sum[i] = mmt * self.acc_sum[i] \\\n                        + (1 - mmt) * num_in_bin\n                    weights[inds] = tot / self.acc_sum[i]\n                else:\n                    weights[inds] = tot / num_in_bin\n                n += 1\n        if n > 0:\n            weights = weights / n\n\n        loss = F.binary_cross_entropy_with_logits(\n            pred, target, reduction='none')\n        loss = weight_reduce_loss(\n            loss, weights, reduction=reduction, avg_factor=tot)\n        return loss * self.loss_weight\n\n\n# TODO: code refactoring to make it consistent with other losses\n@LOSSES.register_module()\nclass GHMR(nn.Module):\n    \"\"\"GHM Regression Loss.\n\n    Details of the theorem can be viewed in the paper\n    `Gradient Harmonized Single-stage Detector\n    <https://arxiv.org/abs/1811.05181>`_.\n\n    Args:\n        mu (float): The parameter for the Authentic Smooth L1 loss.\n        bins (int): Number of the unit regions for distribution calculation.\n        momentum (float): The parameter for moving average.\n        loss_weight (float): The weight of the total GHM-R loss.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n            Defaults to \"mean\"\n    \"\"\"\n\n    def __init__(self,\n                 mu=0.02,\n                 bins=10,\n                 momentum=0,\n                 loss_weight=1.0,\n                 reduction='mean'):\n        super(GHMR, self).__init__()\n        self.mu = mu\n        self.bins = bins\n        edges = torch.arange(bins + 1).float() / bins\n        self.register_buffer('edges', edges)\n        self.edges[-1] = 1e3\n        self.momentum = momentum\n        if momentum > 0:\n            acc_sum = torch.zeros(bins)\n            self.register_buffer('acc_sum', acc_sum)\n        self.loss_weight = loss_weight\n        self.reduction = reduction\n\n    # TODO: support reduction parameter\n    def forward(self,\n                pred,\n                target,\n                label_weight,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Calculate the GHM-R loss.\n\n        Args:\n            pred (float tensor of size [batch_num, 4 (* class_num)]):\n                The prediction of box regression layer. Channel number can be 4\n                or 4 * class_num depending on whether it is class-agnostic.\n            target (float tensor of size [batch_num, 4 (* class_num)]):\n                The target regression values with the same size of pred.\n            label_weight (float tensor of size [batch_num, 4 (* class_num)]):\n                The weight of each sample, 0 if ignored.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        Returns:\n            The gradient harmonized loss.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        mu = self.mu\n        edges = self.edges\n        mmt = self.momentum\n\n        # ASL1 loss\n        diff = pred - target\n        loss = torch.sqrt(diff * diff + mu * mu) - mu\n\n        # gradient length\n        g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()\n        weights = torch.zeros_like(g)\n\n        valid = label_weight > 0\n        tot = max(label_weight.float().sum().item(), 1.0)\n        n = 0  # n: valid bins\n        for i in range(self.bins):\n            inds = (g >= edges[i]) & (g < edges[i + 1]) & valid\n            num_in_bin = inds.sum().item()\n            if num_in_bin > 0:\n                n += 1\n                if mmt > 0:\n                    self.acc_sum[i] = mmt * self.acc_sum[i] \\\n                        + (1 - mmt) * num_in_bin\n                    weights[inds] = tot / self.acc_sum[i]\n                else:\n                    weights[inds] = tot / num_in_bin\n        if n > 0:\n            weights /= n\n        loss = weight_reduce_loss(\n            loss, weights, reduction=reduction, avg_factor=tot)\n        return loss * self.loss_weight\n"
  },
  {
    "path": "mmdet/models/losses/iou_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nimport warnings\n\nimport mmcv\nimport torch\nimport torch.nn as nn\n\nfrom mmdet.core import bbox_overlaps\nfrom ..builder import LOSSES\nfrom .utils import weighted_loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef iou_loss(pred, target, linear=False, mode='log', eps=1e-6):\n    \"\"\"IoU loss.\n\n    Computing the IoU loss between a set of predicted bboxes and target bboxes.\n    The loss is calculated as negative log of IoU.\n\n    Args:\n        pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n            shape (n, 4).\n        target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).\n        linear (bool, optional): If True, use linear scale of loss instead of\n            log scale. Default: False.\n        mode (str): Loss scaling mode, including \"linear\", \"square\", and \"log\".\n            Default: 'log'\n        eps (float): Eps to avoid log(0).\n\n    Return:\n        torch.Tensor: Loss tensor.\n    \"\"\"\n    assert mode in ['linear', 'square', 'log']\n    if linear:\n        mode = 'linear'\n        warnings.warn('DeprecationWarning: Setting \"linear=True\" in '\n                      'iou_loss is deprecated, please use \"mode=`linear`\" '\n                      'instead.')\n    ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)\n    if mode == 'linear':\n        loss = 1 - ious\n    elif mode == 'square':\n        loss = 1 - ious**2\n    elif mode == 'log':\n        loss = -ious.log()\n    else:\n        raise NotImplementedError\n    return loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):\n    \"\"\"BIoULoss.\n\n    This is an implementation of paper\n    `Improving Object Localization with Fitness NMS and Bounded IoU Loss.\n    <https://arxiv.org/abs/1711.00164>`_.\n\n    Args:\n        pred (torch.Tensor): Predicted bboxes.\n        target (torch.Tensor): Target bboxes.\n        beta (float): beta parameter in smoothl1.\n        eps (float): eps to avoid NaN.\n    \"\"\"\n    pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5\n    pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5\n    pred_w = pred[:, 2] - pred[:, 0]\n    pred_h = pred[:, 3] - pred[:, 1]\n    with torch.no_grad():\n        target_ctrx = (target[:, 0] + target[:, 2]) * 0.5\n        target_ctry = (target[:, 1] + target[:, 3]) * 0.5\n        target_w = target[:, 2] - target[:, 0]\n        target_h = target[:, 3] - target[:, 1]\n\n    dx = target_ctrx - pred_ctrx\n    dy = target_ctry - pred_ctry\n\n    loss_dx = 1 - torch.max(\n        (target_w - 2 * dx.abs()) /\n        (target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))\n    loss_dy = 1 - torch.max(\n        (target_h - 2 * dy.abs()) /\n        (target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))\n    loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /\n                            (target_w + eps))\n    loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /\n                            (target_h + eps))\n    # view(..., -1) does not work for empty tensor\n    loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],\n                            dim=-1).flatten(1)\n\n    loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,\n                       loss_comb - 0.5 * beta)\n    return loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef giou_loss(pred, target, eps=1e-7):\n    r\"\"\"`Generalized Intersection over Union: A Metric and A Loss for Bounding\n    Box Regression <https://arxiv.org/abs/1902.09630>`_.\n\n    Args:\n        pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n            shape (n, 4).\n        target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).\n        eps (float): Eps to avoid log(0).\n\n    Return:\n        Tensor: Loss tensor.\n    \"\"\"\n    gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)\n    loss = 1 - gious\n    return loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef diou_loss(pred, target, eps=1e-7):\n    r\"\"\"`Implementation of Distance-IoU Loss: Faster and Better\n    Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.\n\n    Code is modified from https://github.com/Zzh-tju/DIoU.\n\n    Args:\n        pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n            shape (n, 4).\n        target (Tensor): Corresponding gt bboxes, shape (n, 4).\n        eps (float): Eps to avoid log(0).\n    Return:\n        Tensor: Loss tensor.\n    \"\"\"\n    # overlap\n    lt = torch.max(pred[:, :2], target[:, :2])\n    rb = torch.min(pred[:, 2:], target[:, 2:])\n    wh = (rb - lt).clamp(min=0)\n    overlap = wh[:, 0] * wh[:, 1]\n\n    # union\n    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\n    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\n    union = ap + ag - overlap + eps\n\n    # IoU\n    ious = overlap / union\n\n    # enclose area\n    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\n    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\n    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\n\n    cw = enclose_wh[:, 0]\n    ch = enclose_wh[:, 1]\n\n    c2 = cw**2 + ch**2 + eps\n\n    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\n    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\n    b2_x1, b2_y1 = target[:, 0], target[:, 1]\n    b2_x2, b2_y2 = target[:, 2], target[:, 3]\n\n    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4\n    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4\n    rho2 = left + right\n\n    # DIoU\n    dious = ious - rho2 / c2\n    loss = 1 - dious\n    return loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef ciou_loss(pred, target, eps=1e-7):\n    r\"\"\"`Implementation of paper `Enhancing Geometric Factors into\n    Model Learning and Inference for Object Detection and Instance\n    Segmentation <https://arxiv.org/abs/2005.03572>`_.\n\n    Code is modified from https://github.com/Zzh-tju/CIoU.\n\n    Args:\n        pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),\n            shape (n, 4).\n        target (Tensor): Corresponding gt bboxes, shape (n, 4).\n        eps (float): Eps to avoid log(0).\n    Return:\n        Tensor: Loss tensor.\n    \"\"\"\n    # overlap\n    lt = torch.max(pred[:, :2], target[:, :2])\n    rb = torch.min(pred[:, 2:], target[:, 2:])\n    wh = (rb - lt).clamp(min=0)\n    overlap = wh[:, 0] * wh[:, 1]\n\n    # union\n    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])\n    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])\n    union = ap + ag - overlap + eps\n\n    # IoU\n    ious = overlap / union\n\n    # enclose area\n    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])\n    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])\n    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)\n\n    cw = enclose_wh[:, 0]\n    ch = enclose_wh[:, 1]\n\n    c2 = cw**2 + ch**2 + eps\n\n    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]\n    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]\n    b2_x1, b2_y1 = target[:, 0], target[:, 1]\n    b2_x2, b2_y2 = target[:, 2], target[:, 3]\n\n    w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps\n    w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps\n\n    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4\n    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4\n    rho2 = left + right\n\n    factor = 4 / math.pi**2\n    v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)\n\n    with torch.no_grad():\n        alpha = (ious > 0.5).float() * v / (1 - ious + v)\n\n    # CIoU\n    cious = ious - (rho2 / c2 + alpha * v)\n    loss = 1 - cious.clamp(min=-1.0, max=1.0)\n    return loss\n\n\n@LOSSES.register_module()\nclass IoULoss(nn.Module):\n    \"\"\"IoULoss.\n\n    Computing the IoU loss between a set of predicted bboxes and target bboxes.\n\n    Args:\n        linear (bool): If True, use linear scale of loss else determined\n            by mode. Default: False.\n        eps (float): Eps to avoid log(0).\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float): Weight of loss.\n        mode (str): Loss scaling mode, including \"linear\", \"square\", and \"log\".\n            Default: 'log'\n    \"\"\"\n\n    def __init__(self,\n                 linear=False,\n                 eps=1e-6,\n                 reduction='mean',\n                 loss_weight=1.0,\n                 mode='log'):\n        super(IoULoss, self).__init__()\n        assert mode in ['linear', 'square', 'log']\n        if linear:\n            mode = 'linear'\n            warnings.warn('DeprecationWarning: Setting \"linear=True\" in '\n                          'IOULoss is deprecated, please use \"mode=`linear`\" '\n                          'instead.')\n        self.mode = mode\n        self.linear = linear\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None,\n                **kwargs):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): The prediction.\n            target (torch.Tensor): The learning target of the prediction.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None. Options are \"none\", \"mean\" and \"sum\".\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if (weight is not None) and (not torch.any(weight > 0)) and (\n                reduction != 'none'):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        if weight is not None and weight.dim() > 1:\n            # TODO: remove this in the future\n            # reduce the weight of shape (n, 4) to (n,) to match the\n            # iou_loss of shape (n,)\n            assert weight.shape == pred.shape\n            weight = weight.mean(-1)\n        loss = self.loss_weight * iou_loss(\n            pred,\n            target,\n            weight,\n            mode=self.mode,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n\n\n@LOSSES.register_module()\nclass BoundedIoULoss(nn.Module):\n\n    def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0):\n        super(BoundedIoULoss, self).__init__()\n        self.beta = beta\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None,\n                **kwargs):\n        if weight is not None and not torch.any(weight > 0):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss = self.loss_weight * bounded_iou_loss(\n            pred,\n            target,\n            weight,\n            beta=self.beta,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n\n\n@LOSSES.register_module()\nclass GIoULoss(nn.Module):\n\n    def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):\n        super(GIoULoss, self).__init__()\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None,\n                **kwargs):\n        if weight is not None and not torch.any(weight > 0):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if weight is not None and weight.dim() > 1:\n            # TODO: remove this in the future\n            # reduce the weight of shape (n, 4) to (n,) to match the\n            # giou_loss of shape (n,)\n            assert weight.shape == pred.shape\n            weight = weight.mean(-1)\n        loss = self.loss_weight * giou_loss(\n            pred,\n            target,\n            weight,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n\n\n@LOSSES.register_module()\nclass DIoULoss(nn.Module):\n\n    def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):\n        super(DIoULoss, self).__init__()\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None,\n                **kwargs):\n        if weight is not None and not torch.any(weight > 0):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if weight is not None and weight.dim() > 1:\n            # TODO: remove this in the future\n            # reduce the weight of shape (n, 4) to (n,) to match the\n            # giou_loss of shape (n,)\n            assert weight.shape == pred.shape\n            weight = weight.mean(-1)\n        loss = self.loss_weight * diou_loss(\n            pred,\n            target,\n            weight,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n\n\n@LOSSES.register_module()\nclass CIoULoss(nn.Module):\n\n    def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):\n        super(CIoULoss, self).__init__()\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None,\n                **kwargs):\n        if weight is not None and not torch.any(weight > 0):\n            if pred.dim() == weight.dim() + 1:\n                weight = weight.unsqueeze(1)\n            return (pred * weight).sum()  # 0\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if weight is not None and weight.dim() > 1:\n            # TODO: remove this in the future\n            # reduce the weight of shape (n, 4) to (n,) to match the\n            # giou_loss of shape (n,)\n            assert weight.shape == pred.shape\n            weight = weight.mean(-1)\n        loss = self.loss_weight * ciou_loss(\n            pred,\n            target,\n            weight,\n            eps=self.eps,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss\n"
  },
  {
    "path": "mmdet/models/losses/kd_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\nfrom .utils import weighted_loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef knowledge_distillation_kl_div_loss(pred,\n                                       soft_label,\n                                       T,\n                                       detach_target=True):\n    r\"\"\"Loss function for knowledge distilling using KL divergence.\n\n    Args:\n        pred (Tensor): Predicted logits with shape (N, n + 1).\n        soft_label (Tensor): Target logits with shape (N, N + 1).\n        T (int): Temperature for distillation.\n        detach_target (bool): Remove soft_label from automatic differentiation\n\n    Returns:\n        torch.Tensor: Loss tensor with shape (N,).\n    \"\"\"\n    assert pred.size() == soft_label.size()\n    target = F.softmax(soft_label / T, dim=1)\n    if detach_target:\n        target = target.detach()\n\n    kd_loss = F.kl_div(\n        F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (\n            T * T)\n\n    return kd_loss\n\n\n@LOSSES.register_module()\nclass KnowledgeDistillationKLDivLoss(nn.Module):\n    \"\"\"Loss function for knowledge distilling using KL divergence.\n\n    Args:\n        reduction (str): Options are `'none'`, `'mean'` and `'sum'`.\n        loss_weight (float): Loss weight of current loss.\n        T (int): Temperature for distillation.\n    \"\"\"\n\n    def __init__(self, reduction='mean', loss_weight=1.0, T=10):\n        super(KnowledgeDistillationKLDivLoss, self).__init__()\n        assert T >= 1\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.T = T\n\n    def forward(self,\n                pred,\n                soft_label,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (Tensor): Predicted logits with shape (N, n + 1).\n            soft_label (Tensor): Target logits with shape (N, N + 1).\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n\n        loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(\n            pred,\n            soft_label,\n            weight,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            T=self.T)\n\n        return loss_kd\n"
  },
  {
    "path": "mmdet/models/losses/mse_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\nfrom .utils import weighted_loss\n\n\n@weighted_loss\ndef mse_loss(pred, target):\n    \"\"\"Wrapper of mse loss.\"\"\"\n    return F.mse_loss(pred, target, reduction='none')\n\n\n@LOSSES.register_module()\nclass MSELoss(nn.Module):\n    \"\"\"MSELoss.\n\n    Args:\n        reduction (str, optional): The method that reduces the loss to a\n            scalar. Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n    \"\"\"\n\n    def __init__(self, reduction='mean', loss_weight=1.0):\n        super().__init__()\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function of loss.\n\n        Args:\n            pred (torch.Tensor): The prediction.\n            target (torch.Tensor): The learning target of the prediction.\n            weight (torch.Tensor, optional): Weight of the loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n\n        Returns:\n            torch.Tensor: The calculated loss\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss = self.loss_weight * mse_loss(\n            pred, target, weight, reduction=reduction, avg_factor=avg_factor)\n        return loss\n"
  },
  {
    "path": "mmdet/models/losses/pisa_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch\n\nfrom mmdet.core import bbox_overlaps\n\n\n@mmcv.jit(derivate=True, coderize=True)\ndef isr_p(cls_score,\n          bbox_pred,\n          bbox_targets,\n          rois,\n          sampling_results,\n          loss_cls,\n          bbox_coder,\n          k=2,\n          bias=0,\n          num_class=80):\n    \"\"\"Importance-based Sample Reweighting (ISR_P), positive part.\n\n    Args:\n        cls_score (Tensor): Predicted classification scores.\n        bbox_pred (Tensor): Predicted bbox deltas.\n        bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are\n            labels, label_weights, bbox_targets, bbox_weights, respectively.\n        rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs\n            (two_stage) in shape (n, 5).\n        sampling_results (obj): Sampling results.\n        loss_cls (func): Classification loss func of the head.\n        bbox_coder (obj): BBox coder of the head.\n        k (float): Power of the non-linear mapping.\n        bias (float): Shift of the non-linear mapping.\n        num_class (int): Number of classes, default: 80.\n\n    Return:\n        tuple([Tensor]): labels, imp_based_label_weights, bbox_targets,\n            bbox_target_weights\n    \"\"\"\n\n    labels, label_weights, bbox_targets, bbox_weights = bbox_targets\n    pos_label_inds = ((labels >= 0) &\n                      (labels < num_class)).nonzero().reshape(-1)\n    pos_labels = labels[pos_label_inds]\n\n    # if no positive samples, return the original targets\n    num_pos = float(pos_label_inds.size(0))\n    if num_pos == 0:\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    # merge pos_assigned_gt_inds of per image to a single tensor\n    gts = list()\n    last_max_gt = 0\n    for i in range(len(sampling_results)):\n        gt_i = sampling_results[i].pos_assigned_gt_inds\n        gts.append(gt_i + last_max_gt)\n        if len(gt_i) != 0:\n            last_max_gt = gt_i.max() + 1\n    gts = torch.cat(gts)\n    assert len(gts) == num_pos\n\n    cls_score = cls_score.detach()\n    bbox_pred = bbox_pred.detach()\n\n    # For single stage detectors, rois here indicate anchors, in shape (N, 4)\n    # For two stage detectors, rois are in shape (N, 5)\n    if rois.size(-1) == 5:\n        pos_rois = rois[pos_label_inds][:, 1:]\n    else:\n        pos_rois = rois[pos_label_inds]\n\n    if bbox_pred.size(-1) > 4:\n        bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)\n        pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4)\n    else:\n        pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4)\n\n    # compute iou of the predicted bbox and the corresponding GT\n    pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4)\n    pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred)\n    target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target)\n    ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True)\n\n    pos_imp_weights = label_weights[pos_label_inds]\n    # Two steps to compute IoU-HLR. Samples are first sorted by IoU locally,\n    # then sorted again within the same-rank group\n    max_l_num = pos_labels.bincount().max()\n    for label in pos_labels.unique():\n        l_inds = (pos_labels == label).nonzero().view(-1)\n        l_gts = gts[l_inds]\n        for t in l_gts.unique():\n            t_inds = l_inds[l_gts == t]\n            t_ious = ious[t_inds]\n            _, t_iou_rank_idx = t_ious.sort(descending=True)\n            _, t_iou_rank = t_iou_rank_idx.sort()\n            ious[t_inds] += max_l_num - t_iou_rank.float()\n        l_ious = ious[l_inds]\n        _, l_iou_rank_idx = l_ious.sort(descending=True)\n        _, l_iou_rank = l_iou_rank_idx.sort()  # IoU-HLR\n        # linearly map HLR to label weights\n        pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num\n\n    pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k)\n\n    # normalize to make the new weighted loss value equal to the original loss\n    pos_loss_cls = loss_cls(\n        cls_score[pos_label_inds], pos_labels, reduction_override='none')\n    if pos_loss_cls.dim() > 1:\n        ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:,\n                                                                        None]\n        new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None]\n    else:\n        ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds]\n        new_pos_loss_cls = pos_loss_cls * pos_imp_weights\n    pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum()\n    pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio\n    label_weights[pos_label_inds] = pos_imp_weights\n\n    bbox_targets = labels, label_weights, bbox_targets, bbox_weights\n    return bbox_targets\n\n\n@mmcv.jit(derivate=True, coderize=True)\ndef carl_loss(cls_score,\n              labels,\n              bbox_pred,\n              bbox_targets,\n              loss_bbox,\n              k=1,\n              bias=0.2,\n              avg_factor=None,\n              sigmoid=False,\n              num_class=80):\n    \"\"\"Classification-Aware Regression Loss (CARL).\n\n    Args:\n        cls_score (Tensor): Predicted classification scores.\n        labels (Tensor): Targets of classification.\n        bbox_pred (Tensor): Predicted bbox deltas.\n        bbox_targets (Tensor): Target of bbox regression.\n        loss_bbox (func): Regression loss func of the head.\n        bbox_coder (obj): BBox coder of the head.\n        k (float): Power of the non-linear mapping.\n        bias (float): Shift of the non-linear mapping.\n        avg_factor (int): Average factor used in regression loss.\n        sigmoid (bool): Activation of the classification score.\n        num_class (int): Number of classes, default: 80.\n\n    Return:\n        dict: CARL loss dict.\n    \"\"\"\n    pos_label_inds = ((labels >= 0) &\n                      (labels < num_class)).nonzero().reshape(-1)\n    if pos_label_inds.numel() == 0:\n        return dict(loss_carl=cls_score.sum()[None] * 0.)\n    pos_labels = labels[pos_label_inds]\n\n    # multiply pos_cls_score with the corresponding bbox weight\n    # and remain gradient\n    if sigmoid:\n        pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels]\n    else:\n        pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels]\n    carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k)\n\n    # normalize carl_loss_weight to make its sum equal to num positive\n    num_pos = float(pos_cls_score.size(0))\n    weight_ratio = num_pos / carl_loss_weights.sum()\n    carl_loss_weights *= weight_ratio\n\n    if avg_factor is None:\n        avg_factor = bbox_targets.size(0)\n    # if is class agnostic, bbox pred is in shape (N, 4)\n    # otherwise, bbox pred is in shape (N, #classes, 4)\n    if bbox_pred.size(-1) > 4:\n        bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)\n        pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels]\n    else:\n        pos_bbox_preds = bbox_pred[pos_label_inds]\n    ori_loss_reg = loss_bbox(\n        pos_bbox_preds,\n        bbox_targets[pos_label_inds],\n        reduction_override='none') / avg_factor\n    loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum()\n    return dict(loss_carl=loss_carl[None])\n"
  },
  {
    "path": "mmdet/models/losses/seesaw_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\nfrom .accuracy import accuracy\nfrom .cross_entropy_loss import cross_entropy\nfrom .utils import weight_reduce_loss\n\n\ndef seesaw_ce_loss(cls_score,\n                   labels,\n                   label_weights,\n                   cum_samples,\n                   num_classes,\n                   p,\n                   q,\n                   eps,\n                   reduction='mean',\n                   avg_factor=None):\n    \"\"\"Calculate the Seesaw CrossEntropy loss.\n\n    Args:\n        cls_score (torch.Tensor): The prediction with shape (N, C),\n             C is the number of classes.\n        labels (torch.Tensor): The learning label of the prediction.\n        label_weights (torch.Tensor): Sample-wise loss weight.\n        cum_samples (torch.Tensor): Cumulative samples for each category.\n        num_classes (int): The number of classes.\n        p (float): The ``p`` in the mitigation factor.\n        q (float): The ``q`` in the compenstation factor.\n        eps (float): The minimal value of divisor to smooth\n             the computation of compensation factor\n        reduction (str, optional): The method used to reduce the loss.\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n\n    Returns:\n        torch.Tensor: The calculated loss\n    \"\"\"\n    assert cls_score.size(-1) == num_classes\n    assert len(cum_samples) == num_classes\n\n    onehot_labels = F.one_hot(labels, num_classes)\n    seesaw_weights = cls_score.new_ones(onehot_labels.size())\n\n    # mitigation factor\n    if p > 0:\n        sample_ratio_matrix = cum_samples[None, :].clamp(\n            min=1) / cum_samples[:, None].clamp(min=1)\n        index = (sample_ratio_matrix < 1.0).float()\n        sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index)\n        mitigation_factor = sample_weights[labels.long(), :]\n        seesaw_weights = seesaw_weights * mitigation_factor\n\n    # compensation factor\n    if q > 0:\n        scores = F.softmax(cls_score.detach(), dim=1)\n        self_scores = scores[\n            torch.arange(0, len(scores)).to(scores.device).long(),\n            labels.long()]\n        score_matrix = scores / self_scores[:, None].clamp(min=eps)\n        index = (score_matrix > 1.0).float()\n        compensation_factor = score_matrix.pow(q) * index + (1 - index)\n        seesaw_weights = seesaw_weights * compensation_factor\n\n    cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels))\n\n    loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none')\n\n    if label_weights is not None:\n        label_weights = label_weights.float()\n    loss = weight_reduce_loss(\n        loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor)\n    return loss\n\n\n@LOSSES.register_module()\nclass SeesawLoss(nn.Module):\n    \"\"\"\n    Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021)\n    arXiv: https://arxiv.org/abs/2008.10032\n\n    Args:\n        use_sigmoid (bool, optional): Whether the prediction uses sigmoid\n             of softmax. Only False is supported.\n        p (float, optional): The ``p`` in the mitigation factor.\n             Defaults to 0.8.\n        q (float, optional): The ``q`` in the compenstation factor.\n             Defaults to 2.0.\n        num_classes (int, optional): The number of classes.\n             Default to 1203 for LVIS v1 dataset.\n        eps (float, optional): The minimal value of divisor to smooth\n             the computation of compensation factor\n        reduction (str, optional): The method that reduces the loss to a\n             scalar. Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float, optional): The weight of the loss. Defaults to 1.0\n        return_dict (bool, optional): Whether return the losses as a dict.\n             Default to True.\n    \"\"\"\n\n    def __init__(self,\n                 use_sigmoid=False,\n                 p=0.8,\n                 q=2.0,\n                 num_classes=1203,\n                 eps=1e-2,\n                 reduction='mean',\n                 loss_weight=1.0,\n                 return_dict=True):\n        super(SeesawLoss, self).__init__()\n        assert not use_sigmoid\n        self.use_sigmoid = False\n        self.p = p\n        self.q = q\n        self.num_classes = num_classes\n        self.eps = eps\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n        self.return_dict = return_dict\n\n        # 0 for pos, 1 for neg\n        self.cls_criterion = seesaw_ce_loss\n\n        # cumulative samples for each category\n        self.register_buffer(\n            'cum_samples',\n            torch.zeros(self.num_classes + 1, dtype=torch.float))\n\n        # custom output channels of the classifier\n        self.custom_cls_channels = True\n        # custom activation of cls_score\n        self.custom_activation = True\n        # custom accuracy of the classsifier\n        self.custom_accuracy = True\n\n    def _split_cls_score(self, cls_score):\n        # split cls_score to cls_score_classes and cls_score_objectness\n        assert cls_score.size(-1) == self.num_classes + 2\n        cls_score_classes = cls_score[..., :-2]\n        cls_score_objectness = cls_score[..., -2:]\n        return cls_score_classes, cls_score_objectness\n\n    def get_cls_channels(self, num_classes):\n        \"\"\"Get custom classification channels.\n\n        Args:\n            num_classes (int): The number of classes.\n\n        Returns:\n            int: The custom classification channels.\n        \"\"\"\n        assert num_classes == self.num_classes\n        return num_classes + 2\n\n    def get_activation(self, cls_score):\n        \"\"\"Get custom activation of cls_score.\n\n        Args:\n            cls_score (torch.Tensor): The prediction with shape (N, C + 2).\n\n        Returns:\n            torch.Tensor: The custom activation of cls_score with shape\n                 (N, C + 1).\n        \"\"\"\n        cls_score_classes, cls_score_objectness = self._split_cls_score(\n            cls_score)\n        score_classes = F.softmax(cls_score_classes, dim=-1)\n        score_objectness = F.softmax(cls_score_objectness, dim=-1)\n        score_pos = score_objectness[..., [0]]\n        score_neg = score_objectness[..., [1]]\n        score_classes = score_classes * score_pos\n        scores = torch.cat([score_classes, score_neg], dim=-1)\n        return scores\n\n    def get_accuracy(self, cls_score, labels):\n        \"\"\"Get custom accuracy w.r.t. cls_score and labels.\n\n        Args:\n            cls_score (torch.Tensor): The prediction with shape (N, C + 2).\n            labels (torch.Tensor): The learning label of the prediction.\n\n        Returns:\n            Dict [str, torch.Tensor]: The accuracy for objectness and classes,\n                 respectively.\n        \"\"\"\n        pos_inds = labels < self.num_classes\n        obj_labels = (labels == self.num_classes).long()\n        cls_score_classes, cls_score_objectness = self._split_cls_score(\n            cls_score)\n        acc_objectness = accuracy(cls_score_objectness, obj_labels)\n        acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds])\n        acc = dict()\n        acc['acc_objectness'] = acc_objectness\n        acc['acc_classes'] = acc_classes\n        return acc\n\n    def forward(self,\n                cls_score,\n                labels,\n                label_weights=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            cls_score (torch.Tensor): The prediction with shape (N, C + 2).\n            labels (torch.Tensor): The learning label of the prediction.\n            label_weights (torch.Tensor, optional): Sample-wise loss weight.\n            avg_factor (int, optional): Average factor that is used to average\n                 the loss. Defaults to None.\n            reduction (str, optional): The method used to reduce the loss.\n                 Options are \"none\", \"mean\" and \"sum\".\n        Returns:\n            torch.Tensor | Dict [str, torch.Tensor]:\n                 if return_dict == False: The calculated loss |\n                 if return_dict == True: The dict of calculated losses\n                 for objectness and classes, respectively.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        assert cls_score.size(-1) == self.num_classes + 2\n        pos_inds = labels < self.num_classes\n        # 0 for pos, 1 for neg\n        obj_labels = (labels == self.num_classes).long()\n\n        # accumulate the samples for each category\n        unique_labels = labels.unique()\n        for u_l in unique_labels:\n            inds_ = labels == u_l.item()\n            self.cum_samples[u_l] += inds_.sum()\n\n        if label_weights is not None:\n            label_weights = label_weights.float()\n        else:\n            label_weights = labels.new_ones(labels.size(), dtype=torch.float)\n\n        cls_score_classes, cls_score_objectness = self._split_cls_score(\n            cls_score)\n        # calculate loss_cls_classes (only need pos samples)\n        if pos_inds.sum() > 0:\n            loss_cls_classes = self.loss_weight * self.cls_criterion(\n                cls_score_classes[pos_inds], labels[pos_inds],\n                label_weights[pos_inds], self.cum_samples[:self.num_classes],\n                self.num_classes, self.p, self.q, self.eps, reduction,\n                avg_factor)\n        else:\n            loss_cls_classes = cls_score_classes[pos_inds].sum()\n        # calculate loss_cls_objectness\n        loss_cls_objectness = self.loss_weight * cross_entropy(\n            cls_score_objectness, obj_labels, label_weights, reduction,\n            avg_factor)\n\n        if self.return_dict:\n            loss_cls = dict()\n            loss_cls['loss_cls_objectness'] = loss_cls_objectness\n            loss_cls['loss_cls_classes'] = loss_cls_classes\n        else:\n            loss_cls = loss_cls_classes + loss_cls_objectness\n        return loss_cls\n"
  },
  {
    "path": "mmdet/models/losses/smooth_l1_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch\nimport torch.nn as nn\n\nfrom ..builder import LOSSES\nfrom .utils import weighted_loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef smooth_l1_loss(pred, target, beta=1.0):\n    \"\"\"Smooth L1 loss.\n\n    Args:\n        pred (torch.Tensor): The prediction.\n        target (torch.Tensor): The learning target of the prediction.\n        beta (float, optional): The threshold in the piecewise function.\n            Defaults to 1.0.\n\n    Returns:\n        torch.Tensor: Calculated loss\n    \"\"\"\n    assert beta > 0\n    if target.numel() == 0:\n        return pred.sum() * 0\n\n    assert pred.size() == target.size()\n    diff = torch.abs(pred - target)\n    loss = torch.where(diff < beta, 0.5 * diff * diff / beta,\n                       diff - 0.5 * beta)\n    return loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\n@weighted_loss\ndef l1_loss(pred, target):\n    \"\"\"L1 loss.\n\n    Args:\n        pred (torch.Tensor): The prediction.\n        target (torch.Tensor): The learning target of the prediction.\n\n    Returns:\n        torch.Tensor: Calculated loss\n    \"\"\"\n    if target.numel() == 0:\n        return pred.sum() * 0\n\n    assert pred.size() == target.size()\n    loss = torch.abs(pred - target)\n    return loss\n\n\n@LOSSES.register_module()\nclass SmoothL1Loss(nn.Module):\n    \"\"\"Smooth L1 loss.\n\n    Args:\n        beta (float, optional): The threshold in the piecewise function.\n            Defaults to 1.0.\n        reduction (str, optional): The method to reduce the loss.\n            Options are \"none\", \"mean\" and \"sum\". Defaults to \"mean\".\n        loss_weight (float, optional): The weight of loss.\n    \"\"\"\n\n    def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):\n        super(SmoothL1Loss, self).__init__()\n        self.beta = beta\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None,\n                **kwargs):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): The prediction.\n            target (torch.Tensor): The learning target of the prediction.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss_bbox = self.loss_weight * smooth_l1_loss(\n            pred,\n            target,\n            weight,\n            beta=self.beta,\n            reduction=reduction,\n            avg_factor=avg_factor,\n            **kwargs)\n        return loss_bbox\n\n\n@LOSSES.register_module()\nclass L1Loss(nn.Module):\n    \"\"\"L1 loss.\n\n    Args:\n        reduction (str, optional): The method to reduce the loss.\n            Options are \"none\", \"mean\" and \"sum\".\n        loss_weight (float, optional): The weight of loss.\n    \"\"\"\n\n    def __init__(self, reduction='mean', loss_weight=1.0):\n        super(L1Loss, self).__init__()\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): The prediction.\n            target (torch.Tensor): The learning target of the prediction.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Defaults to None.\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        loss_bbox = self.loss_weight * l1_loss(\n            pred, target, weight, reduction=reduction, avg_factor=avg_factor)\n        return loss_bbox\n"
  },
  {
    "path": "mmdet/models/losses/utils.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport functools\n\nimport mmcv\nimport torch\nimport torch.nn.functional as F\n\n\ndef reduce_loss(loss, reduction):\n    \"\"\"Reduce loss as specified.\n\n    Args:\n        loss (Tensor): Elementwise loss tensor.\n        reduction (str): Options are \"none\", \"mean\" and \"sum\".\n\n    Return:\n        Tensor: Reduced loss tensor.\n    \"\"\"\n    reduction_enum = F._Reduction.get_enum(reduction)\n    # none: 0, elementwise_mean:1, sum: 2\n    if reduction_enum == 0:\n        return loss\n    elif reduction_enum == 1:\n        return loss.mean()\n    elif reduction_enum == 2:\n        return loss.sum()\n\n\n@mmcv.jit(derivate=True, coderize=True)\ndef weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):\n    \"\"\"Apply element-wise weight and reduce loss.\n\n    Args:\n        loss (Tensor): Element-wise loss.\n        weight (Tensor): Element-wise weights.\n        reduction (str): Same as built-in losses of PyTorch.\n        avg_factor (float): Average factor when computing the mean of losses.\n\n    Returns:\n        Tensor: Processed loss values.\n    \"\"\"\n    # if weight is specified, apply element-wise weight\n    if weight is not None:\n        loss = loss * weight\n\n    # if avg_factor is not specified, just reduce the loss\n    if avg_factor is None:\n        loss = reduce_loss(loss, reduction)\n    else:\n        # if reduction is mean, then average the loss by avg_factor\n        if reduction == 'mean':\n            # Avoid causing ZeroDivisionError when avg_factor is 0.0,\n            # i.e., all labels of an image belong to ignore index.\n            eps = torch.finfo(torch.float32).eps\n            loss = loss.sum() / (avg_factor + eps)\n        # if reduction is 'none', then do nothing, otherwise raise an error\n        elif reduction != 'none':\n            raise ValueError('avg_factor can not be used with reduction=\"sum\"')\n    return loss\n\n\ndef weighted_loss(loss_func):\n    \"\"\"Create a weighted version of a given loss function.\n\n    To use this decorator, the loss function must have the signature like\n    `loss_func(pred, target, **kwargs)`. The function only needs to compute\n    element-wise loss without any reduction. This decorator will add weight\n    and reduction arguments to the function. The decorated function will have\n    the signature like `loss_func(pred, target, weight=None, reduction='mean',\n    avg_factor=None, **kwargs)`.\n\n    :Example:\n\n    >>> import torch\n    >>> @weighted_loss\n    >>> def l1_loss(pred, target):\n    >>>     return (pred - target).abs()\n\n    >>> pred = torch.Tensor([0, 2, 3])\n    >>> target = torch.Tensor([1, 1, 1])\n    >>> weight = torch.Tensor([1, 0, 1])\n\n    >>> l1_loss(pred, target)\n    tensor(1.3333)\n    >>> l1_loss(pred, target, weight)\n    tensor(1.)\n    >>> l1_loss(pred, target, reduction='none')\n    tensor([1., 1., 2.])\n    >>> l1_loss(pred, target, weight, avg_factor=2)\n    tensor(1.5000)\n    \"\"\"\n\n    @functools.wraps(loss_func)\n    def wrapper(pred,\n                target,\n                weight=None,\n                reduction='mean',\n                avg_factor=None,\n                **kwargs):\n        # get element-wise loss\n        loss = loss_func(pred, target, **kwargs)\n        loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n        return loss\n\n    return wrapper\n"
  },
  {
    "path": "mmdet/models/losses/varifocal_loss.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom ..builder import LOSSES\nfrom .utils import weight_reduce_loss\n\n\n@mmcv.jit(derivate=True, coderize=True)\ndef varifocal_loss(pred,\n                   target,\n                   weight=None,\n                   alpha=0.75,\n                   gamma=2.0,\n                   iou_weighted=True,\n                   reduction='mean',\n                   avg_factor=None):\n    \"\"\"`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n    Args:\n        pred (torch.Tensor): The prediction with shape (N, C), C is the\n            number of classes\n        target (torch.Tensor): The learning target of the iou-aware\n            classification score with shape (N, C), C is the number of classes.\n        weight (torch.Tensor, optional): The weight of loss for each\n            prediction. Defaults to None.\n        alpha (float, optional): A balance factor for the negative part of\n            Varifocal Loss, which is different from the alpha of Focal Loss.\n            Defaults to 0.75.\n        gamma (float, optional): The gamma for calculating the modulating\n            factor. Defaults to 2.0.\n        iou_weighted (bool, optional): Whether to weight the loss of the\n            positive example with the iou target. Defaults to True.\n        reduction (str, optional): The method used to reduce the loss into\n            a scalar. Defaults to 'mean'. Options are \"none\", \"mean\" and\n            \"sum\".\n        avg_factor (int, optional): Average factor that is used to average\n            the loss. Defaults to None.\n    \"\"\"\n    # pred and target should be of the same size\n    assert pred.size() == target.size()\n    pred_sigmoid = pred.sigmoid()\n    target = target.type_as(pred)\n    if iou_weighted:\n        focal_weight = target * (target > 0.0).float() + \\\n            alpha * (pred_sigmoid - target).abs().pow(gamma) * \\\n            (target <= 0.0).float()\n    else:\n        focal_weight = (target > 0.0).float() + \\\n            alpha * (pred_sigmoid - target).abs().pow(gamma) * \\\n            (target <= 0.0).float()\n    loss = F.binary_cross_entropy_with_logits(\n        pred, target, reduction='none') * focal_weight\n    loss = weight_reduce_loss(loss, weight, reduction, avg_factor)\n    return loss\n\n\n@LOSSES.register_module()\nclass VarifocalLoss(nn.Module):\n\n    def __init__(self,\n                 use_sigmoid=True,\n                 alpha=0.75,\n                 gamma=2.0,\n                 iou_weighted=True,\n                 reduction='mean',\n                 loss_weight=1.0):\n        \"\"\"`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_\n\n        Args:\n            use_sigmoid (bool, optional): Whether the prediction is\n                used for sigmoid or softmax. Defaults to True.\n            alpha (float, optional): A balance factor for the negative part of\n                Varifocal Loss, which is different from the alpha of Focal\n                Loss. Defaults to 0.75.\n            gamma (float, optional): The gamma for calculating the modulating\n                factor. Defaults to 2.0.\n            iou_weighted (bool, optional): Whether to weight the loss of the\n                positive examples with the iou target. Defaults to True.\n            reduction (str, optional): The method used to reduce the loss into\n                a scalar. Defaults to 'mean'. Options are \"none\", \"mean\" and\n                \"sum\".\n            loss_weight (float, optional): Weight of loss. Defaults to 1.0.\n        \"\"\"\n        super(VarifocalLoss, self).__init__()\n        assert use_sigmoid is True, \\\n            'Only sigmoid varifocal loss supported now.'\n        assert alpha >= 0.0\n        self.use_sigmoid = use_sigmoid\n        self.alpha = alpha\n        self.gamma = gamma\n        self.iou_weighted = iou_weighted\n        self.reduction = reduction\n        self.loss_weight = loss_weight\n\n    def forward(self,\n                pred,\n                target,\n                weight=None,\n                avg_factor=None,\n                reduction_override=None):\n        \"\"\"Forward function.\n\n        Args:\n            pred (torch.Tensor): The prediction.\n            target (torch.Tensor): The learning target of the prediction.\n            weight (torch.Tensor, optional): The weight of loss for each\n                prediction. Defaults to None.\n            avg_factor (int, optional): Average factor that is used to average\n                the loss. Defaults to None.\n            reduction_override (str, optional): The reduction method used to\n                override the original reduction method of the loss.\n                Options are \"none\", \"mean\" and \"sum\".\n\n        Returns:\n            torch.Tensor: The calculated loss\n        \"\"\"\n        assert reduction_override in (None, 'none', 'mean', 'sum')\n        reduction = (\n            reduction_override if reduction_override else self.reduction)\n        if self.use_sigmoid:\n            loss_cls = self.loss_weight * varifocal_loss(\n                pred,\n                target,\n                weight,\n                alpha=self.alpha,\n                gamma=self.gamma,\n                iou_weighted=self.iou_weighted,\n                reduction=reduction,\n                avg_factor=avg_factor)\n        else:\n            raise NotImplementedError\n        return loss_cls\n"
  },
  {
    "path": "mmdet/models/necks/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .bfp import BFP\nfrom .channel_mapper import ChannelMapper\nfrom .ct_resnet_neck import CTResNetNeck\nfrom .dilated_encoder import DilatedEncoder\nfrom .dyhead import DyHead\nfrom .fpg import FPG\nfrom .fpn import FPN\nfrom .fpn_carafe import FPN_CARAFE\nfrom .hrfpn import HRFPN\nfrom .nas_fpn import NASFPN\nfrom .nasfcos_fpn import NASFCOS_FPN\nfrom .pafpn import PAFPN\nfrom .rfp import RFP\nfrom .ssd_neck import SSDNeck\nfrom .yolo_neck import YOLOV3Neck\nfrom .yolox_pafpn import YOLOXPAFPN\n\n__all__ = [\n    'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',\n    'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',\n    'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead'\n]\n"
  },
  {
    "path": "mmdet/models/necks/bfp.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.cnn.bricks import NonLocal2d\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import NECKS\n\n\n@NECKS.register_module()\nclass BFP(BaseModule):\n    \"\"\"BFP (Balanced Feature Pyramids)\n\n    BFP takes multi-level features as inputs and gather them into a single one,\n    then refine the gathered feature and scatter the refined results to\n    multi-level features. This module is used in Libra R-CNN (CVPR 2019), see\n    the paper `Libra R-CNN: Towards Balanced Learning for Object Detection\n    <https://arxiv.org/abs/1904.02701>`_ for details.\n\n    Args:\n        in_channels (int): Number of input channels (feature maps of all levels\n            should have the same channels).\n        num_levels (int): Number of input feature levels.\n        conv_cfg (dict): The config dict for convolution layers.\n        norm_cfg (dict): The config dict for normalization layers.\n        refine_level (int): Index of integration and refine level of BSF in\n            multi-level features from bottom to top.\n        refine_type (str): Type of the refine op, currently support\n            [None, 'conv', 'non_local'].\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 num_levels,\n                 refine_level=2,\n                 refine_type=None,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 init_cfg=dict(\n                     type='Xavier', layer='Conv2d', distribution='uniform')):\n        super(BFP, self).__init__(init_cfg)\n        assert refine_type in [None, 'conv', 'non_local']\n\n        self.in_channels = in_channels\n        self.num_levels = num_levels\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n\n        self.refine_level = refine_level\n        self.refine_type = refine_type\n        assert 0 <= self.refine_level < self.num_levels\n\n        if self.refine_type == 'conv':\n            self.refine = ConvModule(\n                self.in_channels,\n                self.in_channels,\n                3,\n                padding=1,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n        elif self.refine_type == 'non_local':\n            self.refine = NonLocal2d(\n                self.in_channels,\n                reduction=1,\n                use_scale=False,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == self.num_levels\n\n        # step 1: gather multi-level features by resize and average\n        feats = []\n        gather_size = inputs[self.refine_level].size()[2:]\n        for i in range(self.num_levels):\n            if i < self.refine_level:\n                gathered = F.adaptive_max_pool2d(\n                    inputs[i], output_size=gather_size)\n            else:\n                gathered = F.interpolate(\n                    inputs[i], size=gather_size, mode='nearest')\n            feats.append(gathered)\n\n        bsf = sum(feats) / len(feats)\n\n        # step 2: refine gathered features\n        if self.refine_type is not None:\n            bsf = self.refine(bsf)\n\n        # step 3: scatter refined features to multi-levels by a residual path\n        outs = []\n        for i in range(self.num_levels):\n            out_size = inputs[i].size()[2:]\n            if i < self.refine_level:\n                residual = F.interpolate(bsf, size=out_size, mode='nearest')\n            else:\n                residual = F.adaptive_max_pool2d(bsf, output_size=out_size)\n            outs.append(residual + inputs[i])\n\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/channel_mapper.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import NECKS\n\n\n@NECKS.register_module()\nclass ChannelMapper(BaseModule):\n    r\"\"\"Channel Mapper to reduce/increase channels of backbone features.\n\n    This is used to reduce/increase channels of backbone features.\n\n    Args:\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale).\n        kernel_size (int, optional): kernel_size for reducing channels (used\n            at each scale). Default: 3.\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Default: None.\n        norm_cfg (dict, optional): Config dict for normalization layer.\n            Default: None.\n        act_cfg (dict, optional): Config dict for activation layer in\n            ConvModule. Default: dict(type='ReLU').\n        num_outs (int, optional): Number of output feature maps. There\n            would be extra_convs when num_outs larger than the length\n            of in_channels.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    Example:\n        >>> import torch\n        >>> in_channels = [2, 3, 5, 7]\n        >>> scales = [340, 170, 84, 43]\n        >>> inputs = [torch.rand(1, c, s, s)\n        ...           for c, s in zip(in_channels, scales)]\n        >>> self = ChannelMapper(in_channels, 11, 3).eval()\n        >>> outputs = self.forward(inputs)\n        >>> for i in range(len(outputs)):\n        ...     print(f'outputs[{i}].shape = {outputs[i].shape}')\n        outputs[0].shape = torch.Size([1, 11, 340, 340])\n        outputs[1].shape = torch.Size([1, 11, 170, 170])\n        outputs[2].shape = torch.Size([1, 11, 84, 84])\n        outputs[3].shape = torch.Size([1, 11, 43, 43])\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_size=3,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 act_cfg=dict(type='ReLU'),\n                 num_outs=None,\n                 init_cfg=dict(\n                     type='Xavier', layer='Conv2d', distribution='uniform')):\n        super(ChannelMapper, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.extra_convs = None\n        if num_outs is None:\n            num_outs = len(in_channels)\n        self.convs = nn.ModuleList()\n        for in_channel in in_channels:\n            self.convs.append(\n                ConvModule(\n                    in_channel,\n                    out_channels,\n                    kernel_size,\n                    padding=(kernel_size - 1) // 2,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n        if num_outs > len(in_channels):\n            self.extra_convs = nn.ModuleList()\n            for i in range(len(in_channels), num_outs):\n                if i == len(in_channels):\n                    in_channel = in_channels[-1]\n                else:\n                    in_channel = out_channels\n                self.extra_convs.append(\n                    ConvModule(\n                        in_channel,\n                        out_channels,\n                        3,\n                        stride=2,\n                        padding=1,\n                        conv_cfg=conv_cfg,\n                        norm_cfg=norm_cfg,\n                        act_cfg=act_cfg))\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == len(self.convs)\n        outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]\n        if self.extra_convs:\n            for i in range(len(self.extra_convs)):\n                if i == 0:\n                    outs.append(self.extra_convs[0](inputs[-1]))\n                else:\n                    outs.append(self.extra_convs[i](outs[-1]))\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/ct_resnet_neck.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule, auto_fp16\n\nfrom mmdet.models.builder import NECKS\n\n\n@NECKS.register_module()\nclass CTResNetNeck(BaseModule):\n    \"\"\"The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for\n    object classification and box regression.\n\n    Args:\n         in_channel (int): Number of input channels.\n         num_deconv_filters (tuple[int]): Number of filters per stage.\n         num_deconv_kernels (tuple[int]): Number of kernels per stage.\n         use_dcn (bool): If True, use DCNv2. Default: True.\n         init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channel,\n                 num_deconv_filters,\n                 num_deconv_kernels,\n                 use_dcn=True,\n                 init_cfg=None):\n        super(CTResNetNeck, self).__init__(init_cfg)\n        assert len(num_deconv_filters) == len(num_deconv_kernels)\n        self.fp16_enabled = False\n        self.use_dcn = use_dcn\n        self.in_channel = in_channel\n        self.deconv_layers = self._make_deconv_layer(num_deconv_filters,\n                                                     num_deconv_kernels)\n\n    def _make_deconv_layer(self, num_deconv_filters, num_deconv_kernels):\n        \"\"\"use deconv layers to upsample backbone's output.\"\"\"\n        layers = []\n        for i in range(len(num_deconv_filters)):\n            feat_channel = num_deconv_filters[i]\n            conv_module = ConvModule(\n                self.in_channel,\n                feat_channel,\n                3,\n                padding=1,\n                conv_cfg=dict(type='DCNv2') if self.use_dcn else None,\n                norm_cfg=dict(type='BN'))\n            layers.append(conv_module)\n            upsample_module = ConvModule(\n                feat_channel,\n                feat_channel,\n                num_deconv_kernels[i],\n                stride=2,\n                padding=1,\n                conv_cfg=dict(type='deconv'),\n                norm_cfg=dict(type='BN'))\n            layers.append(upsample_module)\n            self.in_channel = feat_channel\n\n        return nn.Sequential(*layers)\n\n    def init_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.ConvTranspose2d):\n                # In order to be consistent with the source code,\n                # reset the ConvTranspose2d initialization parameters\n                m.reset_parameters()\n                # Simulated bilinear upsampling kernel\n                w = m.weight.data\n                f = math.ceil(w.size(2) / 2)\n                c = (2 * f - 1 - f % 2) / (2. * f)\n                for i in range(w.size(2)):\n                    for j in range(w.size(3)):\n                        w[0, 0, i, j] = \\\n                            (1 - math.fabs(i / f - c)) * (\n                                    1 - math.fabs(j / f - c))\n                for c in range(1, w.size(0)):\n                    w[c, 0, :, :] = w[0, 0, :, :]\n            elif isinstance(m, nn.BatchNorm2d):\n                nn.init.constant_(m.weight, 1)\n                nn.init.constant_(m.bias, 0)\n            # self.use_dcn is False\n            elif not self.use_dcn and isinstance(m, nn.Conv2d):\n                # In order to be consistent with the source code,\n                # reset the Conv2d initialization parameters\n                m.reset_parameters()\n\n    @auto_fp16()\n    def forward(self, inputs):\n        assert isinstance(inputs, (list, tuple))\n        outs = self.deconv_layers(inputs[-1])\n        return outs,\n"
  },
  {
    "path": "mmdet/models/necks/dilated_encoder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm,\n                      normal_init)\nfrom torch.nn import BatchNorm2d\n\nfrom ..builder import NECKS\n\n\nclass Bottleneck(nn.Module):\n    \"\"\"Bottleneck block for DilatedEncoder used in `YOLOF.\n\n    <https://arxiv.org/abs/2103.09460>`.\n\n    The Bottleneck contains three ConvLayers and one residual connection.\n\n    Args:\n        in_channels (int): The number of input channels.\n        mid_channels (int): The number of middle output channels.\n        dilation (int): Dilation rate.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 mid_channels,\n                 dilation,\n                 norm_cfg=dict(type='BN', requires_grad=True)):\n        super(Bottleneck, self).__init__()\n        self.conv1 = ConvModule(\n            in_channels, mid_channels, 1, norm_cfg=norm_cfg)\n        self.conv2 = ConvModule(\n            mid_channels,\n            mid_channels,\n            3,\n            padding=dilation,\n            dilation=dilation,\n            norm_cfg=norm_cfg)\n        self.conv3 = ConvModule(\n            mid_channels, in_channels, 1, norm_cfg=norm_cfg)\n\n    def forward(self, x):\n        identity = x\n        out = self.conv1(x)\n        out = self.conv2(out)\n        out = self.conv3(out)\n        out = out + identity\n        return out\n\n\n@NECKS.register_module()\nclass DilatedEncoder(nn.Module):\n    \"\"\"Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.\n\n    This module contains two types of components:\n        - the original FPN lateral convolution layer and fpn convolution layer,\n              which are 1x1 conv + 3x3 conv\n        - the dilated residual block\n\n    Args:\n        in_channels (int): The number of input channels.\n        out_channels (int): The number of output channels.\n        block_mid_channels (int): The number of middle block output channels\n        num_residual_blocks (int): The number of residual blocks.\n        block_dilations (list): The list of residual blocks dilation.\n    \"\"\"\n\n    def __init__(self, in_channels, out_channels, block_mid_channels,\n                 num_residual_blocks, block_dilations):\n        super(DilatedEncoder, self).__init__()\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.block_mid_channels = block_mid_channels\n        self.num_residual_blocks = num_residual_blocks\n        self.block_dilations = block_dilations\n        self._init_layers()\n\n    def _init_layers(self):\n        self.lateral_conv = nn.Conv2d(\n            self.in_channels, self.out_channels, kernel_size=1)\n        self.lateral_norm = BatchNorm2d(self.out_channels)\n        self.fpn_conv = nn.Conv2d(\n            self.out_channels, self.out_channels, kernel_size=3, padding=1)\n        self.fpn_norm = BatchNorm2d(self.out_channels)\n        encoder_blocks = []\n        for i in range(self.num_residual_blocks):\n            dilation = self.block_dilations[i]\n            encoder_blocks.append(\n                Bottleneck(\n                    self.out_channels,\n                    self.block_mid_channels,\n                    dilation=dilation))\n        self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)\n\n    def init_weights(self):\n        caffe2_xavier_init(self.lateral_conv)\n        caffe2_xavier_init(self.fpn_conv)\n        for m in [self.lateral_norm, self.fpn_norm]:\n            constant_init(m, 1)\n        for m in self.dilated_encoder_blocks.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, mean=0, std=0.01)\n            if is_norm(m):\n                constant_init(m, 1)\n\n    def forward(self, feature):\n        out = self.lateral_norm(self.lateral_conv(feature[-1]))\n        out = self.fpn_norm(self.fpn_conv(out))\n        return self.dilated_encoder_blocks(out),\n"
  },
  {
    "path": "mmdet/models/necks/dyhead.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import (build_activation_layer, build_norm_layer, constant_init,\n                      normal_init)\nfrom mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import NECKS\nfrom ..utils import DyReLU\n\n# Reference:\n# https://github.com/microsoft/DynamicHead\n# https://github.com/jshilong/SEPC\n\n\nclass DyDCNv2(nn.Module):\n    \"\"\"ModulatedDeformConv2d with normalization layer used in DyHead.\n\n    This module cannot be configured with `conv_cfg=dict(type='DCNv2')`\n    because DyHead calculates offset and mask from middle-level feature.\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n        stride (int | tuple[int], optional): Stride of the convolution.\n            Default: 1.\n        norm_cfg (dict, optional): Config dict for normalization layer.\n            Default: dict(type='GN', num_groups=16, requires_grad=True).\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 stride=1,\n                 norm_cfg=dict(type='GN', num_groups=16, requires_grad=True)):\n        super().__init__()\n        self.with_norm = norm_cfg is not None\n        bias = not self.with_norm\n        self.conv = ModulatedDeformConv2d(\n            in_channels, out_channels, 3, stride=stride, padding=1, bias=bias)\n        if self.with_norm:\n            self.norm = build_norm_layer(norm_cfg, out_channels)[1]\n\n    def forward(self, x, offset, mask):\n        \"\"\"Forward function.\"\"\"\n        x = self.conv(x.contiguous(), offset.contiguous(), mask)\n        if self.with_norm:\n            x = self.norm(x)\n        return x\n\n\nclass DyHeadBlock(nn.Module):\n    \"\"\"DyHead Block with three types of attention.\n\n    HSigmoid arguments in default act_cfg follow official code, not paper.\n    https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n        zero_init_offset (bool, optional): Whether to use zero init for\n            `spatial_conv_offset`. Default: True.\n        act_cfg (dict, optional): Config dict for the last activation layer of\n            scale-aware attention. Default: dict(type='HSigmoid', bias=3.0,\n            divisor=6.0).\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 zero_init_offset=True,\n                 act_cfg=dict(type='HSigmoid', bias=3.0, divisor=6.0)):\n        super().__init__()\n        self.zero_init_offset = zero_init_offset\n        # (offset_x, offset_y, mask) * kernel_size_y * kernel_size_x\n        self.offset_and_mask_dim = 3 * 3 * 3\n        self.offset_dim = 2 * 3 * 3\n\n        self.spatial_conv_high = DyDCNv2(in_channels, out_channels)\n        self.spatial_conv_mid = DyDCNv2(in_channels, out_channels)\n        self.spatial_conv_low = DyDCNv2(in_channels, out_channels, stride=2)\n        self.spatial_conv_offset = nn.Conv2d(\n            in_channels, self.offset_and_mask_dim, 3, padding=1)\n        self.scale_attn_module = nn.Sequential(\n            nn.AdaptiveAvgPool2d(1), nn.Conv2d(out_channels, 1, 1),\n            nn.ReLU(inplace=True), build_activation_layer(act_cfg))\n        self.task_attn_module = DyReLU(out_channels)\n        self._init_weights()\n\n    def _init_weights(self):\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                normal_init(m, 0, 0.01)\n        if self.zero_init_offset:\n            constant_init(self.spatial_conv_offset, 0)\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        outs = []\n        for level in range(len(x)):\n            # calculate offset and mask of DCNv2 from middle-level feature\n            offset_and_mask = self.spatial_conv_offset(x[level])\n            offset = offset_and_mask[:, :self.offset_dim, :, :]\n            mask = offset_and_mask[:, self.offset_dim:, :, :].sigmoid()\n\n            mid_feat = self.spatial_conv_mid(x[level], offset, mask)\n            sum_feat = mid_feat * self.scale_attn_module(mid_feat)\n            summed_levels = 1\n            if level > 0:\n                low_feat = self.spatial_conv_low(x[level - 1], offset, mask)\n                sum_feat = sum_feat + \\\n                    low_feat * self.scale_attn_module(low_feat)\n                summed_levels += 1\n            if level < len(x) - 1:\n                # this upsample order is weird, but faster than natural order\n                # https://github.com/microsoft/DynamicHead/issues/25\n                high_feat = F.interpolate(\n                    self.spatial_conv_high(x[level + 1], offset, mask),\n                    size=x[level].shape[-2:],\n                    mode='bilinear',\n                    align_corners=True)\n                sum_feat = sum_feat + high_feat * \\\n                    self.scale_attn_module(high_feat)\n                summed_levels += 1\n            outs.append(self.task_attn_module(sum_feat / summed_levels))\n\n        return outs\n\n\n@NECKS.register_module()\nclass DyHead(BaseModule):\n    \"\"\"DyHead neck consisting of multiple DyHead Blocks.\n\n    See `Dynamic Head: Unifying Object Detection Heads with Attentions\n    <https://arxiv.org/abs/2106.08322>`_ for details.\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n        num_blocks (int, optional): Number of DyHead Blocks. Default: 6.\n        zero_init_offset (bool, optional): Whether to use zero init for\n            `spatial_conv_offset`. Default: True.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_blocks=6,\n                 zero_init_offset=True,\n                 init_cfg=None):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_blocks = num_blocks\n        self.zero_init_offset = zero_init_offset\n\n        dyhead_blocks = []\n        for i in range(num_blocks):\n            in_channels = self.in_channels if i == 0 else self.out_channels\n            dyhead_blocks.append(\n                DyHeadBlock(\n                    in_channels,\n                    self.out_channels,\n                    zero_init_offset=zero_init_offset))\n        self.dyhead_blocks = nn.Sequential(*dyhead_blocks)\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert isinstance(inputs, (tuple, list))\n        outs = self.dyhead_blocks(inputs)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/fpg.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import NECKS\n\n\nclass Transition(BaseModule):\n    \"\"\"Base class for transition.\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n    \"\"\"\n\n    def __init__(self, in_channels, out_channels, init_cfg=None):\n        super().__init__(init_cfg)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n    def forward(x):\n        pass\n\n\nclass UpInterpolationConv(Transition):\n    \"\"\"A transition used for up-sampling.\n\n    Up-sample the input by interpolation then refines the feature by\n    a convolution layer.\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n        scale_factor (int): Up-sampling factor. Default: 2.\n        mode (int): Interpolation mode. Default: nearest.\n        align_corners (bool): Whether align corners when interpolation.\n            Default: None.\n        kernel_size (int): Kernel size for the conv. Default: 3.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 scale_factor=2,\n                 mode='nearest',\n                 align_corners=None,\n                 kernel_size=3,\n                 init_cfg=None,\n                 **kwargs):\n        super().__init__(in_channels, out_channels, init_cfg)\n        self.mode = mode\n        self.scale_factor = scale_factor\n        self.align_corners = align_corners\n        self.conv = ConvModule(\n            in_channels,\n            out_channels,\n            kernel_size,\n            padding=(kernel_size - 1) // 2,\n            **kwargs)\n\n    def forward(self, x):\n        x = F.interpolate(\n            x,\n            scale_factor=self.scale_factor,\n            mode=self.mode,\n            align_corners=self.align_corners)\n        x = self.conv(x)\n        return x\n\n\nclass LastConv(Transition):\n    \"\"\"A transition used for refining the output of the last stage.\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of output channels.\n        num_inputs (int): Number of inputs of the FPN features.\n        kernel_size (int): Kernel size for the conv. Default: 3.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_inputs,\n                 kernel_size=3,\n                 init_cfg=None,\n                 **kwargs):\n        super().__init__(in_channels, out_channels, init_cfg)\n        self.num_inputs = num_inputs\n        self.conv_out = ConvModule(\n            in_channels,\n            out_channels,\n            kernel_size,\n            padding=(kernel_size - 1) // 2,\n            **kwargs)\n\n    def forward(self, inputs):\n        assert len(inputs) == self.num_inputs\n        return self.conv_out(inputs[-1])\n\n\n@NECKS.register_module()\nclass FPG(BaseModule):\n    \"\"\"FPG.\n\n    Implementation of `Feature Pyramid Grids (FPG)\n    <https://arxiv.org/abs/2004.03580>`_.\n    This implementation only gives the basic structure stated in the paper.\n    But users can implement different type of transitions to fully explore the\n    the potential power of the structure of FPG.\n\n    Args:\n        in_channels (int): Number of input channels (feature maps of all levels\n            should have the same channels).\n        out_channels (int): Number of output channels (used at each scale)\n        num_outs (int): Number of output scales.\n        stack_times (int): The number of times the pyramid architecture will\n            be stacked.\n        paths (list[str]): Specify the path order of each stack level.\n            Each element in the list should be either 'bu' (bottom-up) or\n            'td' (top-down).\n        inter_channels (int): Number of inter channels.\n        same_up_trans (dict): Transition that goes down at the same stage.\n        same_down_trans (dict): Transition that goes up at the same stage.\n        across_lateral_trans (dict): Across-pathway same-stage\n        across_down_trans (dict): Across-pathway bottom-up connection.\n        across_up_trans (dict): Across-pathway top-down connection.\n        across_skip_trans (dict): Across-pathway skip connection.\n        output_trans (dict): Transition that trans the output of the\n            last stage.\n        start_level (int): Index of the start input backbone level used to\n            build the feature pyramid. Default: 0.\n        end_level (int): Index of the end input backbone level (exclusive) to\n            build the feature pyramid. Default: -1, which means the last level.\n        add_extra_convs (bool): It decides whether to add conv\n            layers on top of the original feature maps. Default to False.\n            If True, its actual mode is specified by `extra_convs_on_inputs`.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    transition_types = {\n        'conv': ConvModule,\n        'interpolation_conv': UpInterpolationConv,\n        'last_conv': LastConv,\n    }\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs,\n                 stack_times,\n                 paths,\n                 inter_channels=None,\n                 same_down_trans=None,\n                 same_up_trans=dict(\n                     type='conv', kernel_size=3, stride=2, padding=1),\n                 across_lateral_trans=dict(type='conv', kernel_size=1),\n                 across_down_trans=dict(type='conv', kernel_size=3),\n                 across_up_trans=None,\n                 across_skip_trans=dict(type='identity'),\n                 output_trans=dict(type='last_conv', kernel_size=3),\n                 start_level=0,\n                 end_level=-1,\n                 add_extra_convs=False,\n                 norm_cfg=None,\n                 skip_inds=None,\n                 init_cfg=[\n                     dict(type='Caffe2Xavier', layer='Conv2d'),\n                     dict(\n                         type='Constant',\n                         layer=[\n                             '_BatchNorm', '_InstanceNorm', 'GroupNorm',\n                             'LayerNorm'\n                         ],\n                         val=1.0)\n                 ]):\n        super(FPG, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)\n        self.num_outs = num_outs\n        if inter_channels is None:\n            self.inter_channels = [out_channels for _ in range(num_outs)]\n        elif isinstance(inter_channels, int):\n            self.inter_channels = [inter_channels for _ in range(num_outs)]\n        else:\n            assert isinstance(inter_channels, list)\n            assert len(inter_channels) == num_outs\n            self.inter_channels = inter_channels\n        self.stack_times = stack_times\n        self.paths = paths\n        assert isinstance(paths, list) and len(paths) == stack_times\n        for d in paths:\n            assert d in ('bu', 'td')\n\n        self.same_down_trans = same_down_trans\n        self.same_up_trans = same_up_trans\n        self.across_lateral_trans = across_lateral_trans\n        self.across_down_trans = across_down_trans\n        self.across_up_trans = across_up_trans\n        self.output_trans = output_trans\n        self.across_skip_trans = across_skip_trans\n\n        self.with_bias = norm_cfg is None\n        # skip inds must be specified if across skip trans is not None\n        if self.across_skip_trans is not None:\n            skip_inds is not None\n        self.skip_inds = skip_inds\n        assert len(self.skip_inds[0]) <= self.stack_times\n\n        if end_level == -1 or end_level == self.num_ins - 1:\n            self.backbone_end_level = self.num_ins\n            assert num_outs >= self.num_ins - start_level\n        else:\n            # if end_level is not the last level, no extra level is allowed\n            self.backbone_end_level = end_level + 1\n            assert end_level < self.num_ins\n            assert num_outs == end_level - start_level + 1\n        self.start_level = start_level\n        self.end_level = end_level\n        self.add_extra_convs = add_extra_convs\n\n        # build lateral 1x1 convs to reduce channels\n        self.lateral_convs = nn.ModuleList()\n        for i in range(self.start_level, self.backbone_end_level):\n            l_conv = nn.Conv2d(self.in_channels[i],\n                               self.inter_channels[i - self.start_level], 1)\n            self.lateral_convs.append(l_conv)\n\n        extra_levels = num_outs - self.backbone_end_level + self.start_level\n        self.extra_downsamples = nn.ModuleList()\n        for i in range(extra_levels):\n            if self.add_extra_convs:\n                fpn_idx = self.backbone_end_level - self.start_level + i\n                extra_conv = nn.Conv2d(\n                    self.inter_channels[fpn_idx - 1],\n                    self.inter_channels[fpn_idx],\n                    3,\n                    stride=2,\n                    padding=1)\n                self.extra_downsamples.append(extra_conv)\n            else:\n                self.extra_downsamples.append(nn.MaxPool2d(1, stride=2))\n\n        self.fpn_transitions = nn.ModuleList()  # stack times\n        for s in range(self.stack_times):\n            stage_trans = nn.ModuleList()  # num of feature levels\n            for i in range(self.num_outs):\n                # same, across_lateral, across_down, across_up\n                trans = nn.ModuleDict()\n                if s in self.skip_inds[i]:\n                    stage_trans.append(trans)\n                    continue\n                # build same-stage down trans (used in bottom-up paths)\n                if i == 0 or self.same_up_trans is None:\n                    same_up_trans = None\n                else:\n                    same_up_trans = self.build_trans(\n                        self.same_up_trans, self.inter_channels[i - 1],\n                        self.inter_channels[i])\n                trans['same_up'] = same_up_trans\n                # build same-stage up trans (used in top-down paths)\n                if i == self.num_outs - 1 or self.same_down_trans is None:\n                    same_down_trans = None\n                else:\n                    same_down_trans = self.build_trans(\n                        self.same_down_trans, self.inter_channels[i + 1],\n                        self.inter_channels[i])\n                trans['same_down'] = same_down_trans\n                # build across lateral trans\n                across_lateral_trans = self.build_trans(\n                    self.across_lateral_trans, self.inter_channels[i],\n                    self.inter_channels[i])\n                trans['across_lateral'] = across_lateral_trans\n                # build across down trans\n                if i == self.num_outs - 1 or self.across_down_trans is None:\n                    across_down_trans = None\n                else:\n                    across_down_trans = self.build_trans(\n                        self.across_down_trans, self.inter_channels[i + 1],\n                        self.inter_channels[i])\n                trans['across_down'] = across_down_trans\n                # build across up trans\n                if i == 0 or self.across_up_trans is None:\n                    across_up_trans = None\n                else:\n                    across_up_trans = self.build_trans(\n                        self.across_up_trans, self.inter_channels[i - 1],\n                        self.inter_channels[i])\n                trans['across_up'] = across_up_trans\n                if self.across_skip_trans is None:\n                    across_skip_trans = None\n                else:\n                    across_skip_trans = self.build_trans(\n                        self.across_skip_trans, self.inter_channels[i - 1],\n                        self.inter_channels[i])\n                trans['across_skip'] = across_skip_trans\n                # build across_skip trans\n                stage_trans.append(trans)\n            self.fpn_transitions.append(stage_trans)\n\n        self.output_transition = nn.ModuleList()  # output levels\n        for i in range(self.num_outs):\n            trans = self.build_trans(\n                self.output_trans,\n                self.inter_channels[i],\n                self.out_channels,\n                num_inputs=self.stack_times + 1)\n            self.output_transition.append(trans)\n\n        self.relu = nn.ReLU(inplace=True)\n\n    def build_trans(self, cfg, in_channels, out_channels, **extra_args):\n        cfg_ = cfg.copy()\n        trans_type = cfg_.pop('type')\n        trans_cls = self.transition_types[trans_type]\n        return trans_cls(in_channels, out_channels, **cfg_, **extra_args)\n\n    def fuse(self, fuse_dict):\n        out = None\n        for item in fuse_dict.values():\n            if item is not None:\n                if out is None:\n                    out = item\n                else:\n                    out = out + item\n        return out\n\n    def forward(self, inputs):\n        assert len(inputs) == len(self.in_channels)\n\n        # build all levels from original feature maps\n        feats = [\n            lateral_conv(inputs[i + self.start_level])\n            for i, lateral_conv in enumerate(self.lateral_convs)\n        ]\n        for downsample in self.extra_downsamples:\n            feats.append(downsample(feats[-1]))\n\n        outs = [feats]\n\n        for i in range(self.stack_times):\n            current_outs = outs[-1]\n            next_outs = []\n            direction = self.paths[i]\n            for j in range(self.num_outs):\n                if i in self.skip_inds[j]:\n                    next_outs.append(outs[-1][j])\n                    continue\n                # feature level\n                if direction == 'td':\n                    lvl = self.num_outs - j - 1\n                else:\n                    lvl = j\n                # get transitions\n                if direction == 'td':\n                    same_trans = self.fpn_transitions[i][lvl]['same_down']\n                else:\n                    same_trans = self.fpn_transitions[i][lvl]['same_up']\n                across_lateral_trans = self.fpn_transitions[i][lvl][\n                    'across_lateral']\n                across_down_trans = self.fpn_transitions[i][lvl]['across_down']\n                across_up_trans = self.fpn_transitions[i][lvl]['across_up']\n                across_skip_trans = self.fpn_transitions[i][lvl]['across_skip']\n                # init output\n                to_fuse = dict(\n                    same=None, lateral=None, across_up=None, across_down=None)\n                # same downsample/upsample\n                if same_trans is not None:\n                    to_fuse['same'] = same_trans(next_outs[-1])\n                # across lateral\n                if across_lateral_trans is not None:\n                    to_fuse['lateral'] = across_lateral_trans(\n                        current_outs[lvl])\n                # across downsample\n                if lvl > 0 and across_up_trans is not None:\n                    to_fuse['across_up'] = across_up_trans(current_outs[lvl -\n                                                                        1])\n                # across upsample\n                if (lvl < self.num_outs - 1 and across_down_trans is not None):\n                    to_fuse['across_down'] = across_down_trans(\n                        current_outs[lvl + 1])\n                if across_skip_trans is not None:\n                    to_fuse['across_skip'] = across_skip_trans(outs[0][lvl])\n                x = self.fuse(to_fuse)\n                next_outs.append(x)\n\n            if direction == 'td':\n                outs.append(next_outs[::-1])\n            else:\n                outs.append(next_outs)\n\n        # output trans\n        final_outs = []\n        for i in range(self.num_outs):\n            lvl_out_list = []\n            for s in range(len(outs)):\n                lvl_out_list.append(outs[s][i])\n            lvl_out = self.output_transition[i](lvl_out_list)\n            final_outs.append(lvl_out)\n\n        return final_outs\n"
  },
  {
    "path": "mmdet/models/necks/fpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule, auto_fp16\n\nfrom ..builder import NECKS\n\n\n@NECKS.register_module()\nclass FPN(BaseModule):\n    r\"\"\"Feature Pyramid Network.\n\n    This is an implementation of paper `Feature Pyramid Networks for Object\n    Detection <https://arxiv.org/abs/1612.03144>`_.\n\n    Args:\n        in_channels (list[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale).\n        num_outs (int): Number of output scales.\n        start_level (int): Index of the start input backbone level used to\n            build the feature pyramid. Default: 0.\n        end_level (int): Index of the end input backbone level (exclusive) to\n            build the feature pyramid. Default: -1, which means the last level.\n        add_extra_convs (bool | str): If bool, it decides whether to add conv\n            layers on top of the original feature maps. Default to False.\n            If True, it is equivalent to `add_extra_convs='on_input'`.\n            If str, it specifies the source feature map of the extra convs.\n            Only the following options are allowed\n\n            - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n            - 'on_lateral': Last feature map after lateral convs.\n            - 'on_output': The last output feature map after fpn convs.\n        relu_before_extra_convs (bool): Whether to apply relu before the extra\n            conv. Default: False.\n        no_norm_on_lateral (bool): Whether to apply norm on lateral.\n            Default: False.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        act_cfg (dict): Config dict for activation layer in ConvModule.\n            Default: None.\n        upsample_cfg (dict): Config dict for interpolate layer.\n            Default: dict(mode='nearest').\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n\n    Example:\n        >>> import torch\n        >>> in_channels = [2, 3, 5, 7]\n        >>> scales = [340, 170, 84, 43]\n        >>> inputs = [torch.rand(1, c, s, s)\n        ...           for c, s in zip(in_channels, scales)]\n        >>> self = FPN(in_channels, 11, len(in_channels)).eval()\n        >>> outputs = self.forward(inputs)\n        >>> for i in range(len(outputs)):\n        ...     print(f'outputs[{i}].shape = {outputs[i].shape}')\n        outputs[0].shape = torch.Size([1, 11, 340, 340])\n        outputs[1].shape = torch.Size([1, 11, 170, 170])\n        outputs[2].shape = torch.Size([1, 11, 84, 84])\n        outputs[3].shape = torch.Size([1, 11, 43, 43])\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs,\n                 start_level=0,\n                 end_level=-1,\n                 add_extra_convs=False,\n                 relu_before_extra_convs=False,\n                 no_norm_on_lateral=False,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 act_cfg=None,\n                 upsample_cfg=dict(mode='nearest'),\n                 init_cfg=dict(\n                     type='Xavier', layer='Conv2d', distribution='uniform')):\n        super(FPN, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)\n        self.num_outs = num_outs\n        self.relu_before_extra_convs = relu_before_extra_convs\n        self.no_norm_on_lateral = no_norm_on_lateral\n        self.fp16_enabled = False\n        self.upsample_cfg = upsample_cfg.copy()\n\n        if end_level == -1 or end_level == self.num_ins - 1:\n            self.backbone_end_level = self.num_ins\n            assert num_outs >= self.num_ins - start_level\n        else:\n            # if end_level is not the last level, no extra level is allowed\n            self.backbone_end_level = end_level + 1\n            assert end_level < self.num_ins\n            assert num_outs == end_level - start_level + 1\n        self.start_level = start_level\n        self.end_level = end_level\n        self.add_extra_convs = add_extra_convs\n        assert isinstance(add_extra_convs, (str, bool))\n        if isinstance(add_extra_convs, str):\n            # Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'\n            assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')\n        elif add_extra_convs:  # True\n            self.add_extra_convs = 'on_input'\n\n        self.lateral_convs = nn.ModuleList()\n        self.fpn_convs = nn.ModuleList()\n\n        for i in range(self.start_level, self.backbone_end_level):\n            l_conv = ConvModule(\n                in_channels[i],\n                out_channels,\n                1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,\n                act_cfg=act_cfg,\n                inplace=False)\n            fpn_conv = ConvModule(\n                out_channels,\n                out_channels,\n                3,\n                padding=1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg,\n                inplace=False)\n\n            self.lateral_convs.append(l_conv)\n            self.fpn_convs.append(fpn_conv)\n\n        # add extra conv layers (e.g., RetinaNet)\n        extra_levels = num_outs - self.backbone_end_level + self.start_level\n        if self.add_extra_convs and extra_levels >= 1:\n            for i in range(extra_levels):\n                if i == 0 and self.add_extra_convs == 'on_input':\n                    in_channels = self.in_channels[self.backbone_end_level - 1]\n                else:\n                    in_channels = out_channels\n                extra_fpn_conv = ConvModule(\n                    in_channels,\n                    out_channels,\n                    3,\n                    stride=2,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg,\n                    inplace=False)\n                self.fpn_convs.append(extra_fpn_conv)\n\n    @auto_fp16()\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == len(self.in_channels)\n\n        # build laterals\n        laterals = [\n            lateral_conv(inputs[i + self.start_level])\n            for i, lateral_conv in enumerate(self.lateral_convs)\n        ]\n\n        # build top-down path\n        used_backbone_levels = len(laterals)\n        for i in range(used_backbone_levels - 1, 0, -1):\n            # In some cases, fixing `scale factor` (e.g. 2) is preferred, but\n            #  it cannot co-exist with `size` in `F.interpolate`.\n            if 'scale_factor' in self.upsample_cfg:\n                # fix runtime error of \"+=\" inplace operation in PyTorch 1.10\n                laterals[i - 1] = laterals[i - 1] + F.interpolate(\n                    laterals[i], **self.upsample_cfg)\n            else:\n                prev_shape = laterals[i - 1].shape[2:]\n                laterals[i - 1] = laterals[i - 1] + F.interpolate(\n                    laterals[i], size=prev_shape, **self.upsample_cfg)\n\n        # build outputs\n        # part 1: from original levels\n        outs = [\n            self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)\n        ]\n        # part 2: add extra levels\n        if self.num_outs > len(outs):\n            # use max pool to get more levels on top of outputs\n            # (e.g., Faster R-CNN, Mask R-CNN)\n            if not self.add_extra_convs:\n                for i in range(self.num_outs - used_backbone_levels):\n                    outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n            # add conv layers on top of original feature maps (RetinaNet)\n            else:\n                if self.add_extra_convs == 'on_input':\n                    extra_source = inputs[self.backbone_end_level - 1]\n                elif self.add_extra_convs == 'on_lateral':\n                    extra_source = laterals[-1]\n                elif self.add_extra_convs == 'on_output':\n                    extra_source = outs[-1]\n                else:\n                    raise NotImplementedError\n                outs.append(self.fpn_convs[used_backbone_levels](extra_source))\n                for i in range(used_backbone_levels + 1, self.num_outs):\n                    if self.relu_before_extra_convs:\n                        outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n                    else:\n                        outs.append(self.fpn_convs[i](outs[-1]))\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/fpn_carafe.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, build_upsample_layer, xavier_init\nfrom mmcv.ops.carafe import CARAFEPack\nfrom mmcv.runner import BaseModule, ModuleList\n\nfrom ..builder import NECKS\n\n\n@NECKS.register_module()\nclass FPN_CARAFE(BaseModule):\n    \"\"\"FPN_CARAFE is a more flexible implementation of FPN. It allows more\n    choice for upsample methods during the top-down pathway.\n\n    It can reproduce the performance of ICCV 2019 paper\n    CARAFE: Content-Aware ReAssembly of FEatures\n    Please refer to https://arxiv.org/abs/1905.02188 for more details.\n\n    Args:\n        in_channels (list[int]): Number of channels for each input feature map.\n        out_channels (int): Output channels of feature pyramids.\n        num_outs (int): Number of output stages.\n        start_level (int): Start level of feature pyramids.\n            (Default: 0)\n        end_level (int): End level of feature pyramids.\n            (Default: -1 indicates the last level).\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n        activate (str): Type of activation function in ConvModule\n            (Default: None indicates w/o activation).\n        order (dict): Order of components in ConvModule.\n        upsample (str): Type of upsample layer.\n        upsample_cfg (dict): Dictionary to construct and config upsample layer.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs,\n                 start_level=0,\n                 end_level=-1,\n                 norm_cfg=None,\n                 act_cfg=None,\n                 order=('conv', 'norm', 'act'),\n                 upsample_cfg=dict(\n                     type='carafe',\n                     up_kernel=5,\n                     up_group=1,\n                     encoder_kernel=3,\n                     encoder_dilation=1),\n                 init_cfg=None):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super(FPN_CARAFE, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)\n        self.num_outs = num_outs\n        self.norm_cfg = norm_cfg\n        self.act_cfg = act_cfg\n        self.with_bias = norm_cfg is None\n        self.upsample_cfg = upsample_cfg.copy()\n        self.upsample = self.upsample_cfg.get('type')\n        self.relu = nn.ReLU(inplace=False)\n\n        self.order = order\n        assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]\n\n        assert self.upsample in [\n            'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None\n        ]\n        if self.upsample in ['deconv', 'pixel_shuffle']:\n            assert hasattr(\n                self.upsample_cfg,\n                'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0\n            self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')\n\n        if end_level == -1 or end_level == self.num_ins - 1:\n            self.backbone_end_level = self.num_ins\n            assert num_outs >= self.num_ins - start_level\n        else:\n            # if end_level is not the last level, no extra level is allowed\n            self.backbone_end_level = end_level + 1\n            assert end_level < self.num_ins\n            assert num_outs == end_level - start_level + 1\n        self.start_level = start_level\n        self.end_level = end_level\n\n        self.lateral_convs = ModuleList()\n        self.fpn_convs = ModuleList()\n        self.upsample_modules = ModuleList()\n\n        for i in range(self.start_level, self.backbone_end_level):\n            l_conv = ConvModule(\n                in_channels[i],\n                out_channels,\n                1,\n                norm_cfg=norm_cfg,\n                bias=self.with_bias,\n                act_cfg=act_cfg,\n                inplace=False,\n                order=self.order)\n            fpn_conv = ConvModule(\n                out_channels,\n                out_channels,\n                3,\n                padding=1,\n                norm_cfg=self.norm_cfg,\n                bias=self.with_bias,\n                act_cfg=act_cfg,\n                inplace=False,\n                order=self.order)\n            if i != self.backbone_end_level - 1:\n                upsample_cfg_ = self.upsample_cfg.copy()\n                if self.upsample == 'deconv':\n                    upsample_cfg_.update(\n                        in_channels=out_channels,\n                        out_channels=out_channels,\n                        kernel_size=self.upsample_kernel,\n                        stride=2,\n                        padding=(self.upsample_kernel - 1) // 2,\n                        output_padding=(self.upsample_kernel - 1) // 2)\n                elif self.upsample == 'pixel_shuffle':\n                    upsample_cfg_.update(\n                        in_channels=out_channels,\n                        out_channels=out_channels,\n                        scale_factor=2,\n                        upsample_kernel=self.upsample_kernel)\n                elif self.upsample == 'carafe':\n                    upsample_cfg_.update(channels=out_channels, scale_factor=2)\n                else:\n                    # suppress warnings\n                    align_corners = (None\n                                     if self.upsample == 'nearest' else False)\n                    upsample_cfg_.update(\n                        scale_factor=2,\n                        mode=self.upsample,\n                        align_corners=align_corners)\n                upsample_module = build_upsample_layer(upsample_cfg_)\n                self.upsample_modules.append(upsample_module)\n            self.lateral_convs.append(l_conv)\n            self.fpn_convs.append(fpn_conv)\n\n        # add extra conv layers (e.g., RetinaNet)\n        extra_out_levels = (\n            num_outs - self.backbone_end_level + self.start_level)\n        if extra_out_levels >= 1:\n            for i in range(extra_out_levels):\n                in_channels = (\n                    self.in_channels[self.backbone_end_level -\n                                     1] if i == 0 else out_channels)\n                extra_l_conv = ConvModule(\n                    in_channels,\n                    out_channels,\n                    3,\n                    stride=2,\n                    padding=1,\n                    norm_cfg=norm_cfg,\n                    bias=self.with_bias,\n                    act_cfg=act_cfg,\n                    inplace=False,\n                    order=self.order)\n                if self.upsample == 'deconv':\n                    upsampler_cfg_ = dict(\n                        in_channels=out_channels,\n                        out_channels=out_channels,\n                        kernel_size=self.upsample_kernel,\n                        stride=2,\n                        padding=(self.upsample_kernel - 1) // 2,\n                        output_padding=(self.upsample_kernel - 1) // 2)\n                elif self.upsample == 'pixel_shuffle':\n                    upsampler_cfg_ = dict(\n                        in_channels=out_channels,\n                        out_channels=out_channels,\n                        scale_factor=2,\n                        upsample_kernel=self.upsample_kernel)\n                elif self.upsample == 'carafe':\n                    upsampler_cfg_ = dict(\n                        channels=out_channels,\n                        scale_factor=2,\n                        **self.upsample_cfg)\n                else:\n                    # suppress warnings\n                    align_corners = (None\n                                     if self.upsample == 'nearest' else False)\n                    upsampler_cfg_ = dict(\n                        scale_factor=2,\n                        mode=self.upsample,\n                        align_corners=align_corners)\n                upsampler_cfg_['type'] = self.upsample\n                upsample_module = build_upsample_layer(upsampler_cfg_)\n                extra_fpn_conv = ConvModule(\n                    out_channels,\n                    out_channels,\n                    3,\n                    padding=1,\n                    norm_cfg=self.norm_cfg,\n                    bias=self.with_bias,\n                    act_cfg=act_cfg,\n                    inplace=False,\n                    order=self.order)\n                self.upsample_modules.append(upsample_module)\n                self.fpn_convs.append(extra_fpn_conv)\n                self.lateral_convs.append(extra_l_conv)\n\n    # default init_weights for conv(msra) and norm in ConvModule\n    def init_weights(self):\n        \"\"\"Initialize the weights of module.\"\"\"\n        super(FPN_CARAFE, self).init_weights()\n        for m in self.modules():\n            if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):\n                xavier_init(m, distribution='uniform')\n        for m in self.modules():\n            if isinstance(m, CARAFEPack):\n                m.init_weights()\n\n    def slice_as(self, src, dst):\n        \"\"\"Slice ``src`` as ``dst``\n\n        Note:\n            ``src`` should have the same or larger size than ``dst``.\n\n        Args:\n            src (torch.Tensor): Tensors to be sliced.\n            dst (torch.Tensor): ``src`` will be sliced to have the same\n                size as ``dst``.\n\n        Returns:\n            torch.Tensor: Sliced tensor.\n        \"\"\"\n        assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))\n        if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):\n            return src\n        else:\n            return src[:, :, :dst.size(2), :dst.size(3)]\n\n    def tensor_add(self, a, b):\n        \"\"\"Add tensors ``a`` and ``b`` that might have different sizes.\"\"\"\n        if a.size() == b.size():\n            c = a + b\n        else:\n            c = a + self.slice_as(b, a)\n        return c\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == len(self.in_channels)\n\n        # build laterals\n        laterals = []\n        for i, lateral_conv in enumerate(self.lateral_convs):\n            if i <= self.backbone_end_level - self.start_level:\n                input = inputs[min(i + self.start_level, len(inputs) - 1)]\n            else:\n                input = laterals[-1]\n            lateral = lateral_conv(input)\n            laterals.append(lateral)\n\n        # build top-down path\n        for i in range(len(laterals) - 1, 0, -1):\n            if self.upsample is not None:\n                upsample_feat = self.upsample_modules[i - 1](laterals[i])\n            else:\n                upsample_feat = laterals[i]\n            laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)\n\n        # build outputs\n        num_conv_outs = len(self.fpn_convs)\n        outs = []\n        for i in range(num_conv_outs):\n            out = self.fpn_convs[i](laterals[i])\n            outs.append(out)\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/hrfpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\nfrom torch.utils.checkpoint import checkpoint\n\nfrom ..builder import NECKS\n\n\n@NECKS.register_module()\nclass HRFPN(BaseModule):\n    \"\"\"HRFPN (High Resolution Feature Pyramids)\n\n    paper: `High-Resolution Representations for Labeling Pixels and Regions\n    <https://arxiv.org/abs/1904.04514>`_.\n\n    Args:\n        in_channels (list): number of channels for each branch.\n        out_channels (int): output channels of feature pyramids.\n        num_outs (int): number of output stages.\n        pooling_type (str): pooling for generating feature pyramids\n            from {MAX, AVG}.\n        conv_cfg (dict): dictionary to construct and config conv layer.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n        with_cp  (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed.\n        stride (int): stride of 3x3 convolutional layers\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs=5,\n                 pooling_type='AVG',\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 with_cp=False,\n                 stride=1,\n                 init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):\n        super(HRFPN, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)\n        self.num_outs = num_outs\n        self.with_cp = with_cp\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n\n        self.reduction_conv = ConvModule(\n            sum(in_channels),\n            out_channels,\n            kernel_size=1,\n            conv_cfg=self.conv_cfg,\n            act_cfg=None)\n\n        self.fpn_convs = nn.ModuleList()\n        for i in range(self.num_outs):\n            self.fpn_convs.append(\n                ConvModule(\n                    out_channels,\n                    out_channels,\n                    kernel_size=3,\n                    padding=1,\n                    stride=stride,\n                    conv_cfg=self.conv_cfg,\n                    act_cfg=None))\n\n        if pooling_type == 'MAX':\n            self.pooling = F.max_pool2d\n        else:\n            self.pooling = F.avg_pool2d\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == self.num_ins\n        outs = [inputs[0]]\n        for i in range(1, self.num_ins):\n            outs.append(\n                F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))\n        out = torch.cat(outs, dim=1)\n        if out.requires_grad and self.with_cp:\n            out = checkpoint(self.reduction_conv, out)\n        else:\n            out = self.reduction_conv(out)\n        outs = [out]\n        for i in range(1, self.num_outs):\n            outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))\n        outputs = []\n\n        for i in range(self.num_outs):\n            if outs[i].requires_grad and self.with_cp:\n                tmp_out = checkpoint(self.fpn_convs[i], outs[i])\n            else:\n                tmp_out = self.fpn_convs[i](outs[i])\n            outputs.append(tmp_out)\n        return tuple(outputs)\n"
  },
  {
    "path": "mmdet/models/necks/nas_fpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops.merge_cells import GlobalPoolingCell, SumCell\nfrom mmcv.runner import BaseModule, ModuleList\n\nfrom ..builder import NECKS\n\n\n@NECKS.register_module()\nclass NASFPN(BaseModule):\n    \"\"\"NAS-FPN.\n\n    Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture\n    for Object Detection <https://arxiv.org/abs/1904.07392>`_\n\n    Args:\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale)\n        num_outs (int): Number of output scales.\n        stack_times (int): The number of times the pyramid architecture will\n            be stacked.\n        start_level (int): Index of the start input backbone level used to\n            build the feature pyramid. Default: 0.\n        end_level (int): Index of the end input backbone level (exclusive) to\n            build the feature pyramid. Default: -1, which means the last level.\n        add_extra_convs (bool): It decides whether to add conv\n            layers on top of the original feature maps. Default to False.\n            If True, its actual mode is specified by `extra_convs_on_inputs`.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs,\n                 stack_times,\n                 start_level=0,\n                 end_level=-1,\n                 add_extra_convs=False,\n                 norm_cfg=None,\n                 init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):\n        super(NASFPN, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)  # num of input feature levels\n        self.num_outs = num_outs  # num of output feature levels\n        self.stack_times = stack_times\n        self.norm_cfg = norm_cfg\n\n        if end_level == -1 or end_level == self.num_ins - 1:\n            self.backbone_end_level = self.num_ins\n            assert num_outs >= self.num_ins - start_level\n        else:\n            # if end_level is not the last level, no extra level is allowed\n            self.backbone_end_level = end_level + 1\n            assert end_level < self.num_ins\n            assert num_outs == end_level - start_level + 1\n        self.start_level = start_level\n        self.end_level = end_level\n        self.add_extra_convs = add_extra_convs\n\n        # add lateral connections\n        self.lateral_convs = nn.ModuleList()\n        for i in range(self.start_level, self.backbone_end_level):\n            l_conv = ConvModule(\n                in_channels[i],\n                out_channels,\n                1,\n                norm_cfg=norm_cfg,\n                act_cfg=None)\n            self.lateral_convs.append(l_conv)\n\n        # add extra downsample layers (stride-2 pooling or conv)\n        extra_levels = num_outs - self.backbone_end_level + self.start_level\n        self.extra_downsamples = nn.ModuleList()\n        for i in range(extra_levels):\n            extra_conv = ConvModule(\n                out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)\n            self.extra_downsamples.append(\n                nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))\n\n        # add NAS FPN connections\n        self.fpn_stages = ModuleList()\n        for _ in range(self.stack_times):\n            stage = nn.ModuleDict()\n            # gp(p6, p4) -> p4_1\n            stage['gp_64_4'] = GlobalPoolingCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # sum(p4_1, p4) -> p4_2\n            stage['sum_44_4'] = SumCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # sum(p4_2, p3) -> p3_out\n            stage['sum_43_3'] = SumCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # sum(p3_out, p4_2) -> p4_out\n            stage['sum_34_4'] = SumCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # sum(p5, gp(p4_out, p3_out)) -> p5_out\n            stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False)\n            stage['sum_55_5'] = SumCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # sum(p7, gp(p5_out, p4_2)) -> p7_out\n            stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False)\n            stage['sum_77_7'] = SumCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            # gp(p7_out, p5_out) -> p6_out\n            stage['gp_75_6'] = GlobalPoolingCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                out_norm_cfg=norm_cfg)\n            self.fpn_stages.append(stage)\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        # build P3-P5\n        feats = [\n            lateral_conv(inputs[i + self.start_level])\n            for i, lateral_conv in enumerate(self.lateral_convs)\n        ]\n        # build P6-P7 on top of P5\n        for downsample in self.extra_downsamples:\n            feats.append(downsample(feats[-1]))\n\n        p3, p4, p5, p6, p7 = feats\n\n        for stage in self.fpn_stages:\n            # gp(p6, p4) -> p4_1\n            p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:])\n            # sum(p4_1, p4) -> p4_2\n            p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:])\n            # sum(p4_2, p3) -> p3_out\n            p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:])\n            # sum(p3_out, p4_2) -> p4_out\n            p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:])\n            # sum(p5, gp(p4_out, p3_out)) -> p5_out\n            p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:])\n            p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:])\n            # sum(p7, gp(p5_out, p4_2)) -> p7_out\n            p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:])\n            p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:])\n            # gp(p7_out, p5_out) -> p6_out\n            p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:])\n\n        return p3, p4, p5, p6, p7\n"
  },
  {
    "path": "mmdet/models/necks/nasfcos_fpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, caffe2_xavier_init\nfrom mmcv.ops.merge_cells import ConcatCell\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import NECKS\n\n\n@NECKS.register_module()\nclass NASFCOS_FPN(BaseModule):\n    \"\"\"FPN structure in NASFPN.\n\n    Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for\n    Object Detection <https://arxiv.org/abs/1906.04423>`_\n\n    Args:\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale)\n        num_outs (int): Number of output scales.\n        start_level (int): Index of the start input backbone level used to\n            build the feature pyramid. Default: 0.\n        end_level (int): Index of the end input backbone level (exclusive) to\n            build the feature pyramid. Default: -1, which means the last level.\n        add_extra_convs (bool): It decides whether to add conv\n            layers on top of the original feature maps. Default to False.\n            If True, its actual mode is specified by `extra_convs_on_inputs`.\n        conv_cfg (dict): dictionary to construct and config conv layer.\n        norm_cfg (dict): dictionary to construct and config norm layer.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs,\n                 start_level=1,\n                 end_level=-1,\n                 add_extra_convs=False,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 init_cfg=None):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super(NASFCOS_FPN, self).__init__(init_cfg)\n        assert isinstance(in_channels, list)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        self.num_ins = len(in_channels)\n        self.num_outs = num_outs\n        self.norm_cfg = norm_cfg\n        self.conv_cfg = conv_cfg\n\n        if end_level == -1 or end_level == self.num_ins - 1:\n            self.backbone_end_level = self.num_ins\n            assert num_outs >= self.num_ins - start_level\n        else:\n            # if end_level is not the last level, no extra level is allowed\n            self.backbone_end_level = end_level + 1\n            assert end_level < self.num_ins\n            assert num_outs == end_level - start_level + 1\n        self.start_level = start_level\n        self.end_level = end_level\n        self.add_extra_convs = add_extra_convs\n\n        self.adapt_convs = nn.ModuleList()\n        for i in range(self.start_level, self.backbone_end_level):\n            adapt_conv = ConvModule(\n                in_channels[i],\n                out_channels,\n                1,\n                stride=1,\n                padding=0,\n                bias=False,\n                norm_cfg=dict(type='BN'),\n                act_cfg=dict(type='ReLU', inplace=False))\n            self.adapt_convs.append(adapt_conv)\n\n        # C2 is omitted according to the paper\n        extra_levels = num_outs - self.backbone_end_level + self.start_level\n\n        def build_concat_cell(with_input1_conv, with_input2_conv):\n            cell_conv_cfg = dict(\n                kernel_size=1, padding=0, bias=False, groups=out_channels)\n            return ConcatCell(\n                in_channels=out_channels,\n                out_channels=out_channels,\n                with_out_conv=True,\n                out_conv_cfg=cell_conv_cfg,\n                out_norm_cfg=dict(type='BN'),\n                out_conv_order=('norm', 'act', 'conv'),\n                with_input1_conv=with_input1_conv,\n                with_input2_conv=with_input2_conv,\n                input_conv_cfg=conv_cfg,\n                input_norm_cfg=norm_cfg,\n                upsample_mode='nearest')\n\n        # Denote c3=f0, c4=f1, c5=f2 for convince\n        self.fpn = nn.ModuleDict()\n        self.fpn['c22_1'] = build_concat_cell(True, True)\n        self.fpn['c22_2'] = build_concat_cell(True, True)\n        self.fpn['c32'] = build_concat_cell(True, False)\n        self.fpn['c02'] = build_concat_cell(True, False)\n        self.fpn['c42'] = build_concat_cell(True, True)\n        self.fpn['c36'] = build_concat_cell(True, True)\n        self.fpn['c61'] = build_concat_cell(True, True)  # f9\n        self.extra_downsamples = nn.ModuleList()\n        for i in range(extra_levels):\n            extra_act_cfg = None if i == 0 \\\n                else dict(type='ReLU', inplace=False)\n            self.extra_downsamples.append(\n                ConvModule(\n                    out_channels,\n                    out_channels,\n                    3,\n                    stride=2,\n                    padding=1,\n                    act_cfg=extra_act_cfg,\n                    order=('act', 'norm', 'conv')))\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        feats = [\n            adapt_conv(inputs[i + self.start_level])\n            for i, adapt_conv in enumerate(self.adapt_convs)\n        ]\n\n        for (i, module_name) in enumerate(self.fpn):\n            idx_1, idx_2 = int(module_name[1]), int(module_name[2])\n            res = self.fpn[module_name](feats[idx_1], feats[idx_2])\n            feats.append(res)\n\n        ret = []\n        for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]):  # add P3, P4, P5\n            feats1, feats2 = feats[idx], feats[5]\n            feats2_resize = F.interpolate(\n                feats2,\n                size=feats1.size()[2:],\n                mode='bilinear',\n                align_corners=False)\n\n            feats_sum = feats1 + feats2_resize\n            ret.append(\n                F.interpolate(\n                    feats_sum,\n                    size=inputs[input_idx].size()[2:],\n                    mode='bilinear',\n                    align_corners=False))\n\n        for submodule in self.extra_downsamples:\n            ret.append(submodule(ret[-1]))\n\n        return tuple(ret)\n\n    def init_weights(self):\n        \"\"\"Initialize the weights of module.\"\"\"\n        super(NASFCOS_FPN, self).init_weights()\n        for module in self.fpn.values():\n            if hasattr(module, 'conv_out'):\n                caffe2_xavier_init(module.out_conv.conv)\n\n        for modules in [\n                self.adapt_convs.modules(),\n                self.extra_downsamples.modules()\n        ]:\n            for module in modules:\n                if isinstance(module, nn.Conv2d):\n                    caffe2_xavier_init(module)\n"
  },
  {
    "path": "mmdet/models/necks/pafpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import auto_fp16\n\nfrom ..builder import NECKS\nfrom .fpn import FPN\n\n\n@NECKS.register_module()\nclass PAFPN(FPN):\n    \"\"\"Path Aggregation Network for Instance Segmentation.\n\n    This is an implementation of the `PAFPN in Path Aggregation Network\n    <https://arxiv.org/abs/1803.01534>`_.\n\n    Args:\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale)\n        num_outs (int): Number of output scales.\n        start_level (int): Index of the start input backbone level used to\n            build the feature pyramid. Default: 0.\n        end_level (int): Index of the end input backbone level (exclusive) to\n            build the feature pyramid. Default: -1, which means the last level.\n        add_extra_convs (bool | str): If bool, it decides whether to add conv\n            layers on top of the original feature maps. Default to False.\n            If True, it is equivalent to `add_extra_convs='on_input'`.\n            If str, it specifies the source feature map of the extra convs.\n            Only the following options are allowed\n\n            - 'on_input': Last feat map of neck inputs (i.e. backbone feature).\n            - 'on_lateral':  Last feature map after lateral convs.\n            - 'on_output': The last output feature map after fpn convs.\n        relu_before_extra_convs (bool): Whether to apply relu before the extra\n            conv. Default: False.\n        no_norm_on_lateral (bool): Whether to apply norm on lateral.\n            Default: False.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        act_cfg (str): Config dict for activation layer in ConvModule.\n            Default: None.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_outs,\n                 start_level=0,\n                 end_level=-1,\n                 add_extra_convs=False,\n                 relu_before_extra_convs=False,\n                 no_norm_on_lateral=False,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 act_cfg=None,\n                 init_cfg=dict(\n                     type='Xavier', layer='Conv2d', distribution='uniform')):\n        super(PAFPN, self).__init__(\n            in_channels,\n            out_channels,\n            num_outs,\n            start_level,\n            end_level,\n            add_extra_convs,\n            relu_before_extra_convs,\n            no_norm_on_lateral,\n            conv_cfg,\n            norm_cfg,\n            act_cfg,\n            init_cfg=init_cfg)\n        # add extra bottom up pathway\n        self.downsample_convs = nn.ModuleList()\n        self.pafpn_convs = nn.ModuleList()\n        for i in range(self.start_level + 1, self.backbone_end_level):\n            d_conv = ConvModule(\n                out_channels,\n                out_channels,\n                3,\n                stride=2,\n                padding=1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg,\n                inplace=False)\n            pafpn_conv = ConvModule(\n                out_channels,\n                out_channels,\n                3,\n                padding=1,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg,\n                inplace=False)\n            self.downsample_convs.append(d_conv)\n            self.pafpn_convs.append(pafpn_conv)\n\n    @auto_fp16()\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        assert len(inputs) == len(self.in_channels)\n\n        # build laterals\n        laterals = [\n            lateral_conv(inputs[i + self.start_level])\n            for i, lateral_conv in enumerate(self.lateral_convs)\n        ]\n\n        # build top-down path\n        used_backbone_levels = len(laterals)\n        for i in range(used_backbone_levels - 1, 0, -1):\n            prev_shape = laterals[i - 1].shape[2:]\n            # fix runtime error of \"+=\" inplace operation in PyTorch 1.10\n            laterals[i - 1] = laterals[i - 1] + F.interpolate(\n                laterals[i], size=prev_shape, mode='nearest')\n\n        # build outputs\n        # part 1: from original levels\n        inter_outs = [\n            self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)\n        ]\n\n        # part 2: add bottom-up path\n        for i in range(0, used_backbone_levels - 1):\n            inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i])\n\n        outs = []\n        outs.append(inter_outs[0])\n        outs.extend([\n            self.pafpn_convs[i - 1](inter_outs[i])\n            for i in range(1, used_backbone_levels)\n        ])\n\n        # part 3: add extra levels\n        if self.num_outs > len(outs):\n            # use max pool to get more levels on top of outputs\n            # (e.g., Faster R-CNN, Mask R-CNN)\n            if not self.add_extra_convs:\n                for i in range(self.num_outs - used_backbone_levels):\n                    outs.append(F.max_pool2d(outs[-1], 1, stride=2))\n            # add conv layers on top of original feature maps (RetinaNet)\n            else:\n                if self.add_extra_convs == 'on_input':\n                    orig = inputs[self.backbone_end_level - 1]\n                    outs.append(self.fpn_convs[used_backbone_levels](orig))\n                elif self.add_extra_convs == 'on_lateral':\n                    outs.append(self.fpn_convs[used_backbone_levels](\n                        laterals[-1]))\n                elif self.add_extra_convs == 'on_output':\n                    outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))\n                else:\n                    raise NotImplementedError\n                for i in range(used_backbone_levels + 1, self.num_outs):\n                    if self.relu_before_extra_convs:\n                        outs.append(self.fpn_convs[i](F.relu(outs[-1])))\n                    else:\n                        outs.append(self.fpn_convs[i](outs[-1]))\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/rfp.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import constant_init, xavier_init\nfrom mmcv.runner import BaseModule, ModuleList\n\nfrom ..builder import NECKS, build_backbone\nfrom .fpn import FPN\n\n\nclass ASPP(BaseModule):\n    \"\"\"ASPP (Atrous Spatial Pyramid Pooling)\n\n    This is an implementation of the ASPP module used in DetectoRS\n    (https://arxiv.org/pdf/2006.02334.pdf)\n\n    Args:\n        in_channels (int): Number of input channels.\n        out_channels (int): Number of channels produced by this module\n        dilations (tuple[int]): Dilations of the four branches.\n            Default: (1, 3, 6, 1)\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 dilations=(1, 3, 6, 1),\n                 init_cfg=dict(type='Kaiming', layer='Conv2d')):\n        super().__init__(init_cfg)\n        assert dilations[-1] == 1\n        self.aspp = nn.ModuleList()\n        for dilation in dilations:\n            kernel_size = 3 if dilation > 1 else 1\n            padding = dilation if dilation > 1 else 0\n            conv = nn.Conv2d(\n                in_channels,\n                out_channels,\n                kernel_size=kernel_size,\n                stride=1,\n                dilation=dilation,\n                padding=padding,\n                bias=True)\n            self.aspp.append(conv)\n        self.gap = nn.AdaptiveAvgPool2d(1)\n\n    def forward(self, x):\n        avg_x = self.gap(x)\n        out = []\n        for aspp_idx in range(len(self.aspp)):\n            inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x\n            out.append(F.relu_(self.aspp[aspp_idx](inp)))\n        out[-1] = out[-1].expand_as(out[-2])\n        out = torch.cat(out, dim=1)\n        return out\n\n\n@NECKS.register_module()\nclass RFP(FPN):\n    \"\"\"RFP (Recursive Feature Pyramid)\n\n    This is an implementation of RFP in `DetectoRS\n    <https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the\n    input of RFP should be multi level features along with origin input image\n    of backbone.\n\n    Args:\n        rfp_steps (int): Number of unrolled steps of RFP.\n        rfp_backbone (dict): Configuration of the backbone for RFP.\n        aspp_out_channels (int): Number of output channels of ASPP module.\n        aspp_dilations (tuple[int]): Dilation rates of four branches.\n            Default: (1, 3, 6, 1)\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 rfp_steps,\n                 rfp_backbone,\n                 aspp_out_channels,\n                 aspp_dilations=(1, 3, 6, 1),\n                 init_cfg=None,\n                 **kwargs):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super().__init__(init_cfg=init_cfg, **kwargs)\n        self.rfp_steps = rfp_steps\n        # Be careful! Pretrained weights cannot be loaded when use\n        # nn.ModuleList\n        self.rfp_modules = ModuleList()\n        for rfp_idx in range(1, rfp_steps):\n            rfp_module = build_backbone(rfp_backbone)\n            self.rfp_modules.append(rfp_module)\n        self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels,\n                             aspp_dilations)\n        self.rfp_weight = nn.Conv2d(\n            self.out_channels,\n            1,\n            kernel_size=1,\n            stride=1,\n            padding=0,\n            bias=True)\n\n    def init_weights(self):\n        # Avoid using super().init_weights(), which may alter the default\n        # initialization of the modules in self.rfp_modules that have missing\n        # keys in the pretrained checkpoint.\n        for convs in [self.lateral_convs, self.fpn_convs]:\n            for m in convs.modules():\n                if isinstance(m, nn.Conv2d):\n                    xavier_init(m, distribution='uniform')\n        for rfp_idx in range(self.rfp_steps - 1):\n            self.rfp_modules[rfp_idx].init_weights()\n        constant_init(self.rfp_weight, 0)\n\n    def forward(self, inputs):\n        inputs = list(inputs)\n        assert len(inputs) == len(self.in_channels) + 1  # +1 for input image\n        img = inputs.pop(0)\n        # FPN forward\n        x = super().forward(tuple(inputs))\n        for rfp_idx in range(self.rfp_steps - 1):\n            rfp_feats = [x[0]] + list(\n                self.rfp_aspp(x[i]) for i in range(1, len(x)))\n            x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)\n            # FPN forward\n            x_idx = super().forward(x_idx)\n            x_new = []\n            for ft_idx in range(len(x_idx)):\n                add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))\n                x_new.append(add_weight * x_idx[ft_idx] +\n                             (1 - add_weight) * x[ft_idx])\n            x = x_new\n        return x\n"
  },
  {
    "path": "mmdet/models/necks/ssd_neck.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import NECKS\n\n\n@NECKS.register_module()\nclass SSDNeck(BaseModule):\n    \"\"\"Extra layers of SSD backbone to generate multi-scale feature maps.\n\n    Args:\n        in_channels (Sequence[int]): Number of input channels per scale.\n        out_channels (Sequence[int]): Number of output channels per scale.\n        level_strides (Sequence[int]): Stride of 3x3 conv per level.\n        level_paddings (Sequence[int]): Padding size of 3x3 conv per level.\n        l2_norm_scale (float|None): L2 normalization layer init scale.\n            If None, not use L2 normalization on the first input feature.\n        last_kernel_size (int): Kernel size of the last conv layer.\n            Default: 3.\n        use_depthwise (bool): Whether to use DepthwiseSeparableConv.\n            Default: False.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: None.\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='ReLU').\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 level_strides,\n                 level_paddings,\n                 l2_norm_scale=20.,\n                 last_kernel_size=3,\n                 use_depthwise=False,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 act_cfg=dict(type='ReLU'),\n                 init_cfg=[\n                     dict(\n                         type='Xavier', distribution='uniform',\n                         layer='Conv2d'),\n                     dict(type='Constant', val=1, layer='BatchNorm2d'),\n                 ]):\n        super(SSDNeck, self).__init__(init_cfg)\n        assert len(out_channels) > len(in_channels)\n        assert len(out_channels) - len(in_channels) == len(level_strides)\n        assert len(level_strides) == len(level_paddings)\n        assert in_channels == out_channels[:len(in_channels)]\n\n        if l2_norm_scale:\n            self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)\n            self.init_cfg += [\n                dict(\n                    type='Constant',\n                    val=self.l2_norm.scale,\n                    override=dict(name='l2_norm'))\n            ]\n\n        self.extra_layers = nn.ModuleList()\n        extra_layer_channels = out_channels[len(in_channels):]\n        second_conv = DepthwiseSeparableConvModule if \\\n            use_depthwise else ConvModule\n\n        for i, (out_channel, stride, padding) in enumerate(\n                zip(extra_layer_channels, level_strides, level_paddings)):\n            kernel_size = last_kernel_size \\\n                if i == len(extra_layer_channels) - 1 else 3\n            per_lvl_convs = nn.Sequential(\n                ConvModule(\n                    out_channels[len(in_channels) - 1 + i],\n                    out_channel // 2,\n                    1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg),\n                second_conv(\n                    out_channel // 2,\n                    out_channel,\n                    kernel_size,\n                    stride=stride,\n                    padding=padding,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n            self.extra_layers.append(per_lvl_convs)\n\n    def forward(self, inputs):\n        \"\"\"Forward function.\"\"\"\n        outs = [feat for feat in inputs]\n        if hasattr(self, 'l2_norm'):\n            outs[0] = self.l2_norm(outs[0])\n\n        feat = outs[-1]\n        for layer in self.extra_layers:\n            feat = layer(feat)\n            outs.append(feat)\n        return tuple(outs)\n\n\nclass L2Norm(nn.Module):\n\n    def __init__(self, n_dims, scale=20., eps=1e-10):\n        \"\"\"L2 normalization layer.\n\n        Args:\n            n_dims (int): Number of dimensions to be normalized\n            scale (float, optional): Defaults to 20..\n            eps (float, optional): Used to avoid division by zero.\n                Defaults to 1e-10.\n        \"\"\"\n        super(L2Norm, self).__init__()\n        self.n_dims = n_dims\n        self.weight = nn.Parameter(torch.Tensor(self.n_dims))\n        self.eps = eps\n        self.scale = scale\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        # normalization layer convert to FP32 in FP16 training\n        x_float = x.float()\n        norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps\n        return (self.weight[None, :, None, None].float().expand_as(x_float) *\n                x_float / norm).type_as(x)\n"
  },
  {
    "path": "mmdet/models/necks/yolo_neck.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Copyright (c) 2019 Western Digital Corporation or its affiliates.\n\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import NECKS\n\n\nclass DetectionBlock(BaseModule):\n    \"\"\"Detection block in YOLO neck.\n\n    Let out_channels = n, the DetectionBlock contains:\n    Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.\n    The first 6 ConvLayers are formed the following way:\n        1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.\n    The Conv2D layer is 1x1x255.\n    Some block will have branch after the fifth ConvLayer.\n    The input channel is arbitrary (in_channels)\n\n    Args:\n        in_channels (int): The number of input channels.\n        out_channels (int): The number of output channels.\n        conv_cfg (dict): Config dict for convolution layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Default: dict(type='BN', requires_grad=True)\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 act_cfg=dict(type='LeakyReLU', negative_slope=0.1),\n                 init_cfg=None):\n        super(DetectionBlock, self).__init__(init_cfg)\n        double_out_channels = out_channels * 2\n\n        # shortcut\n        cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)\n        self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)\n        self.conv2 = ConvModule(\n            out_channels, double_out_channels, 3, padding=1, **cfg)\n        self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)\n        self.conv4 = ConvModule(\n            out_channels, double_out_channels, 3, padding=1, **cfg)\n        self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)\n\n    def forward(self, x):\n        tmp = self.conv1(x)\n        tmp = self.conv2(tmp)\n        tmp = self.conv3(tmp)\n        tmp = self.conv4(tmp)\n        out = self.conv5(tmp)\n        return out\n\n\n@NECKS.register_module()\nclass YOLOV3Neck(BaseModule):\n    \"\"\"The neck of YOLOV3.\n\n    It can be treated as a simplified version of FPN. It\n    will take the result from Darknet backbone and do some upsampling and\n    concatenation. It will finally output the detection result.\n\n    Note:\n        The input feats should be from top to bottom.\n            i.e., from high-lvl to low-lvl\n        But YOLOV3Neck will process them in reversed order.\n            i.e., from bottom (high-lvl) to top (low-lvl)\n\n    Args:\n        num_scales (int): The number of scales / stages.\n        in_channels (List[int]): The number of input channels per scale.\n        out_channels (List[int]): The number of output channels  per scale.\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Default: None.\n        norm_cfg (dict, optional): Dictionary to construct and config norm\n            layer. Default: dict(type='BN', requires_grad=True)\n        act_cfg (dict, optional): Config dict for activation layer.\n            Default: dict(type='LeakyReLU', negative_slope=0.1).\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 num_scales,\n                 in_channels,\n                 out_channels,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 act_cfg=dict(type='LeakyReLU', negative_slope=0.1),\n                 init_cfg=None):\n        super(YOLOV3Neck, self).__init__(init_cfg)\n        assert (num_scales == len(in_channels) == len(out_channels))\n        self.num_scales = num_scales\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n        # shortcut\n        cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)\n\n        # To support arbitrary scales, the code looks awful, but it works.\n        # Better solution is welcomed.\n        self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg)\n        for i in range(1, self.num_scales):\n            in_c, out_c = self.in_channels[i], self.out_channels[i]\n            inter_c = out_channels[i - 1]\n            self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg))\n            # in_c + out_c : High-lvl feats will be cat with low-lvl feats\n            self.add_module(f'detect{i+1}',\n                            DetectionBlock(in_c + out_c, out_c, **cfg))\n\n    def forward(self, feats):\n        assert len(feats) == self.num_scales\n\n        # processed from bottom (high-lvl) to top (low-lvl)\n        outs = []\n        out = self.detect1(feats[-1])\n        outs.append(out)\n\n        for i, x in enumerate(reversed(feats[:-1])):\n            conv = getattr(self, f'conv{i+1}')\n            tmp = conv(out)\n\n            # Cat with low-lvl feats\n            tmp = F.interpolate(tmp, scale_factor=2)\n            tmp = torch.cat((tmp, x), 1)\n\n            detect = getattr(self, f'detect{i+2}')\n            out = detect(tmp)\n            outs.append(out)\n\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/necks/yolox_pafpn.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import NECKS\nfrom ..utils import CSPLayer\n\n\n@NECKS.register_module()\nclass YOLOXPAFPN(BaseModule):\n    \"\"\"Path Aggregation Network used in YOLOX.\n\n    Args:\n        in_channels (List[int]): Number of input channels per scale.\n        out_channels (int): Number of output channels (used at each scale)\n        num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3\n        use_depthwise (bool): Whether to depthwise separable convolution in\n            blocks. Default: False\n        upsample_cfg (dict): Config dict for interpolate layer.\n            Default: `dict(scale_factor=2, mode='nearest')`\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Default: None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN')\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='Swish')\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 num_csp_blocks=3,\n                 use_depthwise=False,\n                 upsample_cfg=dict(scale_factor=2, mode='nearest'),\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish'),\n                 init_cfg=dict(\n                     type='Kaiming',\n                     layer='Conv2d',\n                     a=math.sqrt(5),\n                     distribution='uniform',\n                     mode='fan_in',\n                     nonlinearity='leaky_relu')):\n        super(YOLOXPAFPN, self).__init__(init_cfg)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n\n        conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule\n\n        # build top-down blocks\n        self.upsample = nn.Upsample(**upsample_cfg)\n        self.reduce_layers = nn.ModuleList()\n        self.top_down_blocks = nn.ModuleList()\n        for idx in range(len(in_channels) - 1, 0, -1):\n            self.reduce_layers.append(\n                ConvModule(\n                    in_channels[idx],\n                    in_channels[idx - 1],\n                    1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n            self.top_down_blocks.append(\n                CSPLayer(\n                    in_channels[idx - 1] * 2,\n                    in_channels[idx - 1],\n                    num_blocks=num_csp_blocks,\n                    add_identity=False,\n                    use_depthwise=use_depthwise,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n\n        # build bottom-up blocks\n        self.downsamples = nn.ModuleList()\n        self.bottom_up_blocks = nn.ModuleList()\n        for idx in range(len(in_channels) - 1):\n            self.downsamples.append(\n                conv(\n                    in_channels[idx],\n                    in_channels[idx],\n                    3,\n                    stride=2,\n                    padding=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n            self.bottom_up_blocks.append(\n                CSPLayer(\n                    in_channels[idx] * 2,\n                    in_channels[idx + 1],\n                    num_blocks=num_csp_blocks,\n                    add_identity=False,\n                    use_depthwise=use_depthwise,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n\n        self.out_convs = nn.ModuleList()\n        for i in range(len(in_channels)):\n            self.out_convs.append(\n                ConvModule(\n                    in_channels[i],\n                    out_channels,\n                    1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    act_cfg=act_cfg))\n\n    def forward(self, inputs):\n        \"\"\"\n        Args:\n            inputs (tuple[Tensor]): input features.\n\n        Returns:\n            tuple[Tensor]: YOLOXPAFPN features.\n        \"\"\"\n        assert len(inputs) == len(self.in_channels)\n\n        # top-down path\n        inner_outs = [inputs[-1]]\n        for idx in range(len(self.in_channels) - 1, 0, -1):\n            feat_heigh = inner_outs[0]\n            feat_low = inputs[idx - 1]\n            feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx](\n                feat_heigh)\n            inner_outs[0] = feat_heigh\n\n            upsample_feat = self.upsample(feat_heigh)\n\n            inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](\n                torch.cat([upsample_feat, feat_low], 1))\n            inner_outs.insert(0, inner_out)\n\n        # bottom-up path\n        outs = [inner_outs[0]]\n        for idx in range(len(self.in_channels) - 1):\n            feat_low = outs[-1]\n            feat_height = inner_outs[idx + 1]\n            downsample_feat = self.downsamples[idx](feat_low)\n            out = self.bottom_up_blocks[idx](\n                torch.cat([downsample_feat, feat_height], 1))\n            outs.append(out)\n\n        # out convs\n        for idx, conv in enumerate(self.out_convs):\n            outs[idx] = conv(outs[idx])\n\n        return tuple(outs)\n"
  },
  {
    "path": "mmdet/models/plugins/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .dropblock import DropBlock\nfrom .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder\nfrom .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder\n\n__all__ = [\n    'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder',\n    'MSDeformAttnPixelDecoder'\n]\n"
  },
  {
    "path": "mmdet/models/plugins/dropblock.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import PLUGIN_LAYERS\n\neps = 1e-6\n\n\n@PLUGIN_LAYERS.register_module()\nclass DropBlock(nn.Module):\n    \"\"\"Randomly drop some regions of feature maps.\n\n     Please refer to the method proposed in `DropBlock\n     <https://arxiv.org/abs/1810.12890>`_ for details.\n\n    Args:\n        drop_prob (float): The probability of dropping each block.\n        block_size (int): The size of dropped blocks.\n        warmup_iters (int): The drop probability will linearly increase\n            from `0` to `drop_prob` during the first `warmup_iters` iterations.\n            Default: 2000.\n    \"\"\"\n\n    def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):\n        super(DropBlock, self).__init__()\n        assert block_size % 2 == 1\n        assert 0 < drop_prob <= 1\n        assert warmup_iters >= 0\n        self.drop_prob = drop_prob\n        self.block_size = block_size\n        self.warmup_iters = warmup_iters\n        self.iter_cnt = 0\n\n    def forward(self, x):\n        \"\"\"\n        Args:\n            x (Tensor): Input feature map on which some areas will be randomly\n                dropped.\n\n        Returns:\n            Tensor: The tensor after DropBlock layer.\n        \"\"\"\n        if not self.training:\n            return x\n        self.iter_cnt += 1\n        N, C, H, W = list(x.shape)\n        gamma = self._compute_gamma((H, W))\n        mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1)\n        mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device))\n\n        mask = F.pad(mask, [self.block_size // 2] * 4, value=0)\n        mask = F.max_pool2d(\n            input=mask,\n            stride=(1, 1),\n            kernel_size=(self.block_size, self.block_size),\n            padding=self.block_size // 2)\n        mask = 1 - mask\n        x = x * mask * mask.numel() / (eps + mask.sum())\n        return x\n\n    def _compute_gamma(self, feat_size):\n        \"\"\"Compute the value of gamma according to paper. gamma is the\n        parameter of bernoulli distribution, which controls the number of\n        features to drop.\n\n        gamma = (drop_prob * fm_area) / (drop_area * keep_area)\n\n        Args:\n            feat_size (tuple[int, int]): The height and width of feature map.\n\n        Returns:\n            float: The value of gamma.\n        \"\"\"\n        gamma = (self.drop_prob * feat_size[0] * feat_size[1])\n        gamma /= ((feat_size[0] - self.block_size + 1) *\n                  (feat_size[1] - self.block_size + 1))\n        gamma /= (self.block_size**2)\n        factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt /\n                  self.warmup_iters)\n        return gamma * factor\n\n    def extra_repr(self):\n        return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, '\n                f'warmup_iters={self.warmup_iters}')\n"
  },
  {
    "path": "mmdet/models/plugins/msdeformattn_pixel_decoder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import (PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init,\n                      normal_init, xavier_init)\nfrom mmcv.cnn.bricks.transformer import (build_positional_encoding,\n                                         build_transformer_layer_sequence)\nfrom mmcv.runner import BaseModule, ModuleList\n\nfrom mmdet.core.anchor import MlvlPointGenerator\nfrom mmdet.models.utils.transformer import MultiScaleDeformableAttention\n\n\n@PLUGIN_LAYERS.register_module()\nclass MSDeformAttnPixelDecoder(BaseModule):\n    \"\"\"Pixel decoder with multi-scale deformable attention.\n\n    Args:\n        in_channels (list[int] | tuple[int]): Number of channels in the\n            input feature maps.\n        strides (list[int] | tuple[int]): Output strides of feature from\n            backbone.\n        feat_channels (int): Number of channels for feature.\n        out_channels (int): Number of channels for output.\n        num_outs (int): Number of output scales.\n        norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization.\n            Defaults to dict(type='GN', num_groups=32).\n        act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation.\n            Defaults to dict(type='ReLU').\n        encoder (:obj:`mmcv.ConfigDict` | dict): Config for transformer\n            encoder. Defaults to `DetrTransformerEncoder`.\n        positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for\n            transformer encoder position encoding. Defaults to\n            dict(type='SinePositionalEncoding', num_feats=128,\n            normalize=True).\n        init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels=[256, 512, 1024, 2048],\n                 strides=[4, 8, 16, 32],\n                 feat_channels=256,\n                 out_channels=256,\n                 num_outs=3,\n                 norm_cfg=dict(type='GN', num_groups=32),\n                 act_cfg=dict(type='ReLU'),\n                 encoder=dict(\n                     type='DetrTransformerEncoder',\n                     num_layers=6,\n                     transformerlayers=dict(\n                         type='BaseTransformerLayer',\n                         attn_cfgs=dict(\n                             type='MultiScaleDeformableAttention',\n                             embed_dims=256,\n                             num_heads=8,\n                             num_levels=3,\n                             num_points=4,\n                             im2col_step=64,\n                             dropout=0.0,\n                             batch_first=False,\n                             norm_cfg=None,\n                             init_cfg=None),\n                         feedforward_channels=1024,\n                         ffn_dropout=0.0,\n                         operation_order=('self_attn', 'norm', 'ffn', 'norm')),\n                     init_cfg=None),\n                 positional_encoding=dict(\n                     type='SinePositionalEncoding',\n                     num_feats=128,\n                     normalize=True),\n                 init_cfg=None):\n        super().__init__(init_cfg=init_cfg)\n        self.strides = strides\n        self.num_input_levels = len(in_channels)\n        self.num_encoder_levels = \\\n            encoder.transformerlayers.attn_cfgs.num_levels\n        assert self.num_encoder_levels >= 1, \\\n            'num_levels in attn_cfgs must be at least one'\n        input_conv_list = []\n        # from top to down (low to high resolution)\n        for i in range(self.num_input_levels - 1,\n                       self.num_input_levels - self.num_encoder_levels - 1,\n                       -1):\n            input_conv = ConvModule(\n                in_channels[i],\n                feat_channels,\n                kernel_size=1,\n                norm_cfg=norm_cfg,\n                act_cfg=None,\n                bias=True)\n            input_conv_list.append(input_conv)\n        self.input_convs = ModuleList(input_conv_list)\n\n        self.encoder = build_transformer_layer_sequence(encoder)\n        self.postional_encoding = build_positional_encoding(\n            positional_encoding)\n        # high resolution to low resolution\n        self.level_encoding = nn.Embedding(self.num_encoder_levels,\n                                           feat_channels)\n\n        # fpn-like structure\n        self.lateral_convs = ModuleList()\n        self.output_convs = ModuleList()\n        self.use_bias = norm_cfg is None\n        # from top to down (low to high resolution)\n        # fpn for the rest features that didn't pass in encoder\n        for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1,\n                       -1):\n            lateral_conv = ConvModule(\n                in_channels[i],\n                feat_channels,\n                kernel_size=1,\n                bias=self.use_bias,\n                norm_cfg=norm_cfg,\n                act_cfg=None)\n            output_conv = ConvModule(\n                feat_channels,\n                feat_channels,\n                kernel_size=3,\n                stride=1,\n                padding=1,\n                bias=self.use_bias,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            self.lateral_convs.append(lateral_conv)\n            self.output_convs.append(output_conv)\n\n        self.mask_feature = Conv2d(\n            feat_channels, out_channels, kernel_size=1, stride=1, padding=0)\n\n        self.num_outs = num_outs\n        self.point_generator = MlvlPointGenerator(strides)\n\n    def init_weights(self):\n        \"\"\"Initialize weights.\"\"\"\n        for i in range(0, self.num_encoder_levels):\n            xavier_init(\n                self.input_convs[i].conv,\n                gain=1,\n                bias=0,\n                distribution='uniform')\n\n        for i in range(0, self.num_input_levels - self.num_encoder_levels):\n            caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)\n            caffe2_xavier_init(self.output_convs[i].conv, bias=0)\n\n        caffe2_xavier_init(self.mask_feature, bias=0)\n\n        normal_init(self.level_encoding, mean=0, std=1)\n        for p in self.encoder.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_normal_(p)\n\n        # init_weights defined in MultiScaleDeformableAttention\n        for layer in self.encoder.layers:\n            for attn in layer.attentions:\n                if isinstance(attn, MultiScaleDeformableAttention):\n                    attn.init_weights()\n\n    def forward(self, feats):\n        \"\"\"\n        Args:\n            feats (list[Tensor]): Feature maps of each level. Each has\n                shape of (batch_size, c, h, w).\n\n        Returns:\n            tuple: A tuple containing the following:\n\n            - mask_feature (Tensor): shape (batch_size, c, h, w).\n            - multi_scale_features (list[Tensor]): Multi scale \\\n                    features, each in shape (batch_size, c, h, w).\n        \"\"\"\n        # generate padding mask for each level, for each image\n        batch_size = feats[0].shape[0]\n        encoder_input_list = []\n        padding_mask_list = []\n        level_positional_encoding_list = []\n        spatial_shapes = []\n        reference_points_list = []\n        for i in range(self.num_encoder_levels):\n            level_idx = self.num_input_levels - i - 1\n            feat = feats[level_idx]\n            feat_projected = self.input_convs[i](feat)\n            h, w = feat.shape[-2:]\n\n            # no padding\n            padding_mask_resized = feat.new_zeros(\n                (batch_size, ) + feat.shape[-2:], dtype=torch.bool)\n            pos_embed = self.postional_encoding(padding_mask_resized)\n            level_embed = self.level_encoding.weight[i]\n            level_pos_embed = level_embed.view(1, -1, 1, 1) + pos_embed\n            # (h_i * w_i, 2)\n            reference_points = self.point_generator.single_level_grid_priors(\n                feat.shape[-2:], level_idx, device=feat.device)\n            # normalize\n            factor = feat.new_tensor([[w, h]]) * self.strides[level_idx]\n            reference_points = reference_points / factor\n\n            # shape (batch_size, c, h_i, w_i) -> (h_i * w_i, batch_size, c)\n            feat_projected = feat_projected.flatten(2).permute(2, 0, 1)\n            level_pos_embed = level_pos_embed.flatten(2).permute(2, 0, 1)\n            padding_mask_resized = padding_mask_resized.flatten(1)\n\n            encoder_input_list.append(feat_projected)\n            padding_mask_list.append(padding_mask_resized)\n            level_positional_encoding_list.append(level_pos_embed)\n            spatial_shapes.append(feat.shape[-2:])\n            reference_points_list.append(reference_points)\n        # shape (batch_size, total_num_query),\n        # total_num_query=sum([., h_i * w_i,.])\n        padding_masks = torch.cat(padding_mask_list, dim=1)\n        # shape (total_num_query, batch_size, c)\n        encoder_inputs = torch.cat(encoder_input_list, dim=0)\n        level_positional_encodings = torch.cat(\n            level_positional_encoding_list, dim=0)\n        device = encoder_inputs.device\n        # shape (num_encoder_levels, 2), from low\n        # resolution to high resolution\n        spatial_shapes = torch.as_tensor(\n            spatial_shapes, dtype=torch.long, device=device)\n        # shape (0, h_0*w_0, h_0*w_0+h_1*w_1, ...)\n        level_start_index = torch.cat((spatial_shapes.new_zeros(\n            (1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))\n        reference_points = torch.cat(reference_points_list, dim=0)\n        reference_points = reference_points[None, :, None].repeat(\n            batch_size, 1, self.num_encoder_levels, 1)\n        valid_radios = reference_points.new_ones(\n            (batch_size, self.num_encoder_levels, 2))\n        # shape (num_total_query, batch_size, c)\n        memory = self.encoder(\n            query=encoder_inputs,\n            key=None,\n            value=None,\n            query_pos=level_positional_encodings,\n            key_pos=None,\n            attn_masks=None,\n            key_padding_mask=None,\n            query_key_padding_mask=padding_masks,\n            spatial_shapes=spatial_shapes,\n            reference_points=reference_points,\n            level_start_index=level_start_index,\n            valid_radios=valid_radios)\n        # (num_total_query, batch_size, c) -> (batch_size, c, num_total_query)\n        memory = memory.permute(1, 2, 0)\n\n        # from low resolution to high resolution\n        num_query_per_level = [e[0] * e[1] for e in spatial_shapes]\n        outs = torch.split(memory, num_query_per_level, dim=-1)\n        outs = [\n            x.reshape(batch_size, -1, spatial_shapes[i][0],\n                      spatial_shapes[i][1]) for i, x in enumerate(outs)\n        ]\n\n        for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1,\n                       -1):\n            x = feats[i]\n            cur_feat = self.lateral_convs[i](x)\n            y = cur_feat + F.interpolate(\n                outs[-1],\n                size=cur_feat.shape[-2:],\n                mode='bilinear',\n                align_corners=False)\n            y = self.output_convs[i](y)\n            outs.append(y)\n        multi_scale_features = outs[:self.num_outs]\n\n        mask_feature = self.mask_feature(outs[-1])\n        return mask_feature, multi_scale_features\n"
  },
  {
    "path": "mmdet/models/plugins/pixel_decoder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init\nfrom mmcv.cnn.bricks.transformer import (build_positional_encoding,\n                                         build_transformer_layer_sequence)\nfrom mmcv.runner import BaseModule, ModuleList\n\n\n@PLUGIN_LAYERS.register_module()\nclass PixelDecoder(BaseModule):\n    \"\"\"Pixel decoder with a structure like fpn.\n\n    Args:\n        in_channels (list[int] | tuple[int]): Number of channels in the\n            input feature maps.\n        feat_channels (int): Number channels for feature.\n        out_channels (int): Number channels for output.\n        norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization.\n            Defaults to dict(type='GN', num_groups=32).\n        act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation.\n            Defaults to dict(type='ReLU').\n        encoder (:obj:`mmcv.ConfigDict` | dict): Config for transorformer\n            encoder.Defaults to None.\n        positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for\n            transformer encoder position encoding. Defaults to\n            dict(type='SinePositionalEncoding', num_feats=128,\n            normalize=True).\n        init_cfg (:obj:`mmcv.ConfigDict` | dict):  Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 feat_channels,\n                 out_channels,\n                 norm_cfg=dict(type='GN', num_groups=32),\n                 act_cfg=dict(type='ReLU'),\n                 init_cfg=None):\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.num_inputs = len(in_channels)\n        self.lateral_convs = ModuleList()\n        self.output_convs = ModuleList()\n        self.use_bias = norm_cfg is None\n        for i in range(0, self.num_inputs - 1):\n            lateral_conv = ConvModule(\n                in_channels[i],\n                feat_channels,\n                kernel_size=1,\n                bias=self.use_bias,\n                norm_cfg=norm_cfg,\n                act_cfg=None)\n            output_conv = ConvModule(\n                feat_channels,\n                feat_channels,\n                kernel_size=3,\n                stride=1,\n                padding=1,\n                bias=self.use_bias,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            self.lateral_convs.append(lateral_conv)\n            self.output_convs.append(output_conv)\n\n        self.last_feat_conv = ConvModule(\n            in_channels[-1],\n            feat_channels,\n            kernel_size=3,\n            padding=1,\n            stride=1,\n            bias=self.use_bias,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.mask_feature = Conv2d(\n            feat_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n    def init_weights(self):\n        \"\"\"Initialize weights.\"\"\"\n        for i in range(0, self.num_inputs - 2):\n            caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)\n            caffe2_xavier_init(self.output_convs[i].conv, bias=0)\n\n        caffe2_xavier_init(self.mask_feature, bias=0)\n        caffe2_xavier_init(self.last_feat_conv, bias=0)\n\n    def forward(self, feats, img_metas):\n        \"\"\"\n        Args:\n            feats (list[Tensor]): Feature maps of each level. Each has\n                shape of (batch_size, c, h, w).\n            img_metas (list[dict]): List of image information. Pass in\n                for creating more accurate padding mask. Not used here.\n\n        Returns:\n            tuple: a tuple containing the following:\n                - mask_feature (Tensor): Shape (batch_size, c, h, w).\n                - memory (Tensor): Output of last stage of backbone.\\\n                        Shape (batch_size, c, h, w).\n        \"\"\"\n        y = self.last_feat_conv(feats[-1])\n        for i in range(self.num_inputs - 2, -1, -1):\n            x = feats[i]\n            cur_feat = self.lateral_convs[i](x)\n            y = cur_feat + \\\n                F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')\n            y = self.output_convs[i](y)\n\n        mask_feature = self.mask_feature(y)\n        memory = feats[-1]\n        return mask_feature, memory\n\n\n@PLUGIN_LAYERS.register_module()\nclass TransformerEncoderPixelDecoder(PixelDecoder):\n    \"\"\"Pixel decoder with transormer encoder inside.\n\n    Args:\n        in_channels (list[int] | tuple[int]): Number of channels in the\n            input feature maps.\n        feat_channels (int): Number channels for feature.\n        out_channels (int): Number channels for output.\n        norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization.\n            Defaults to dict(type='GN', num_groups=32).\n        act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation.\n            Defaults to dict(type='ReLU').\n        encoder (:obj:`mmcv.ConfigDict` | dict): Config for transorformer\n            encoder.Defaults to None.\n        positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for\n            transformer encoder position encoding. Defaults to\n            dict(type='SinePositionalEncoding', num_feats=128,\n            normalize=True).\n        init_cfg (:obj:`mmcv.ConfigDict` | dict):  Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 feat_channels,\n                 out_channels,\n                 norm_cfg=dict(type='GN', num_groups=32),\n                 act_cfg=dict(type='ReLU'),\n                 encoder=None,\n                 positional_encoding=dict(\n                     type='SinePositionalEncoding',\n                     num_feats=128,\n                     normalize=True),\n                 init_cfg=None):\n        super(TransformerEncoderPixelDecoder, self).__init__(\n            in_channels,\n            feat_channels,\n            out_channels,\n            norm_cfg,\n            act_cfg,\n            init_cfg=init_cfg)\n        self.last_feat_conv = None\n\n        self.encoder = build_transformer_layer_sequence(encoder)\n        self.encoder_embed_dims = self.encoder.embed_dims\n        assert self.encoder_embed_dims == feat_channels, 'embed_dims({}) of ' \\\n            'tranformer encoder must equal to feat_channels({})'.format(\n                feat_channels, self.encoder_embed_dims)\n        self.positional_encoding = build_positional_encoding(\n            positional_encoding)\n        self.encoder_in_proj = Conv2d(\n            in_channels[-1], feat_channels, kernel_size=1)\n        self.encoder_out_proj = ConvModule(\n            feat_channels,\n            feat_channels,\n            kernel_size=3,\n            stride=1,\n            padding=1,\n            bias=self.use_bias,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n    def init_weights(self):\n        \"\"\"Initialize weights.\"\"\"\n        for i in range(0, self.num_inputs - 2):\n            caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)\n            caffe2_xavier_init(self.output_convs[i].conv, bias=0)\n\n        caffe2_xavier_init(self.mask_feature, bias=0)\n        caffe2_xavier_init(self.encoder_in_proj, bias=0)\n        caffe2_xavier_init(self.encoder_out_proj.conv, bias=0)\n\n        for p in self.encoder.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n\n    def forward(self, feats, img_metas):\n        \"\"\"\n        Args:\n            feats (list[Tensor]): Feature maps of each level. Each has\n                shape of (batch_size, c, h, w).\n            img_metas (list[dict]): List of image information. Pass in\n                for creating more accurate padding mask.\n\n        Returns:\n            tuple: a tuple containing the following:\n                - mask_feature (Tensor): shape (batch_size, c, h, w).\n                - memory (Tensor): shape (batch_size, c, h, w).\n        \"\"\"\n        feat_last = feats[-1]\n        bs, c, h, w = feat_last.shape\n        input_img_h, input_img_w = img_metas[0]['batch_input_shape']\n        padding_mask = feat_last.new_ones((bs, input_img_h, input_img_w),\n                                          dtype=torch.float32)\n        for i in range(bs):\n            img_h, img_w, _ = img_metas[i]['img_shape']\n            padding_mask[i, :img_h, :img_w] = 0\n        padding_mask = F.interpolate(\n            padding_mask.unsqueeze(1),\n            size=feat_last.shape[-2:],\n            mode='nearest').to(torch.bool).squeeze(1)\n\n        pos_embed = self.positional_encoding(padding_mask)\n        feat_last = self.encoder_in_proj(feat_last)\n        # (batch_size, c, h, w) -> (num_queries, batch_size, c)\n        feat_last = feat_last.flatten(2).permute(2, 0, 1)\n        pos_embed = pos_embed.flatten(2).permute(2, 0, 1)\n        # (batch_size, h, w) -> (batch_size, h*w)\n        padding_mask = padding_mask.flatten(1)\n        memory = self.encoder(\n            query=feat_last,\n            key=None,\n            value=None,\n            query_pos=pos_embed,\n            query_key_padding_mask=padding_mask)\n        # (num_queries, batch_size, c) -> (batch_size, c, h, w)\n        memory = memory.permute(1, 2, 0).view(bs, self.encoder_embed_dims, h,\n                                              w)\n        y = self.encoder_out_proj(memory)\n        for i in range(self.num_inputs - 2, -1, -1):\n            x = feats[i]\n            cur_feat = self.lateral_convs[i](x)\n            y = cur_feat + \\\n                F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')\n            y = self.output_convs[i](y)\n\n        mask_feature = self.mask_feature(y)\n        return mask_feature, memory\n"
  },
  {
    "path": "mmdet/models/roi_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_roi_head import BaseRoIHead\nfrom .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead,\n                         DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead,\n                         Shared2FCBBoxHead, Shared4Conv1FCBBoxHead)\nfrom .cascade_roi_head import CascadeRoIHead\nfrom .double_roi_head import DoubleHeadRoIHead\nfrom .dynamic_roi_head import DynamicRoIHead\nfrom .grid_roi_head import GridRoIHead\nfrom .htc_roi_head import HybridTaskCascadeRoIHead\nfrom .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead,\n                         FusedSemanticHead, GlobalContextHead, GridHead,\n                         HTCMaskHead, MaskIoUHead, MaskPointHead,\n                         SCNetMaskHead, SCNetSemanticHead)\nfrom .mask_scoring_roi_head import MaskScoringRoIHead\nfrom .pisa_roi_head import PISARoIHead\nfrom .point_rend_roi_head import PointRendRoIHead\nfrom .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor,\n                             SingleRoIExtractor)\nfrom .scnet_roi_head import SCNetRoIHead\nfrom .shared_heads import ResLayer\nfrom .sparse_roi_head import SparseRoIHead\nfrom .standard_roi_head import StandardRoIHead\nfrom .trident_roi_head import TridentRoIHead\n\n__all__ = [\n    'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead',\n    'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead',\n    'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead',\n    'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead',\n    'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',\n    'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor',\n    'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead',\n    'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead',\n    'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead',\n    'FeatureRelayHead', 'GlobalContextHead'\n]\n"
  },
  {
    "path": "mmdet/models/roi_heads/base_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nfrom mmcv.runner import BaseModule\n\nfrom ..builder import build_shared_head\n\n\nclass BaseRoIHead(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for RoIHeads.\"\"\"\n\n    def __init__(self,\n                 bbox_roi_extractor=None,\n                 bbox_head=None,\n                 mask_roi_extractor=None,\n                 mask_head=None,\n                 shared_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(BaseRoIHead, self).__init__(init_cfg)\n        self.train_cfg = train_cfg\n        self.test_cfg = test_cfg\n        if shared_head is not None:\n            shared_head.pretrained = pretrained\n            self.shared_head = build_shared_head(shared_head)\n\n        if bbox_head is not None:\n            self.init_bbox_head(bbox_roi_extractor, bbox_head)\n\n        if mask_head is not None:\n            self.init_mask_head(mask_roi_extractor, mask_head)\n\n        self.init_assigner_sampler()\n\n    @property\n    def with_bbox(self):\n        \"\"\"bool: whether the RoI head contains a `bbox_head`\"\"\"\n        return hasattr(self, 'bbox_head') and self.bbox_head is not None\n\n    @property\n    def with_mask(self):\n        \"\"\"bool: whether the RoI head contains a `mask_head`\"\"\"\n        return hasattr(self, 'mask_head') and self.mask_head is not None\n\n    @property\n    def with_shared_head(self):\n        \"\"\"bool: whether the RoI head contains a `shared_head`\"\"\"\n        return hasattr(self, 'shared_head') and self.shared_head is not None\n\n    @abstractmethod\n    def init_bbox_head(self):\n        \"\"\"Initialize ``bbox_head``\"\"\"\n        pass\n\n    @abstractmethod\n    def init_mask_head(self):\n        \"\"\"Initialize ``mask_head``\"\"\"\n        pass\n\n    @abstractmethod\n    def init_assigner_sampler(self):\n        \"\"\"Initialize assigner and sampler.\"\"\"\n        pass\n\n    @abstractmethod\n    def forward_train(self,\n                      x,\n                      img_meta,\n                      proposal_list,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None,\n                      **kwargs):\n        \"\"\"Forward function during training.\"\"\"\n\n    async def async_simple_test(self,\n                                x,\n                                proposal_list,\n                                img_metas,\n                                proposals=None,\n                                rescale=False,\n                                **kwargs):\n        \"\"\"Asynchronized test function.\"\"\"\n        raise NotImplementedError\n\n    def simple_test(self,\n                    x,\n                    proposal_list,\n                    img_meta,\n                    proposals=None,\n                    rescale=False,\n                    **kwargs):\n        \"\"\"Test without augmentation.\"\"\"\n\n    def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):\n        \"\"\"Test with augmentations.\n\n        If rescale is False, then returned bboxes and masks will fit the scale\n        of imgs[0].\n        \"\"\"\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .bbox_head import BBoxHead\nfrom .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,\n                               Shared4Conv1FCBBoxHead)\nfrom .dii_head import DIIHead\nfrom .double_bbox_head import DoubleConvFCBBoxHead\nfrom .sabl_head import SABLHead\nfrom .scnet_bbox_head import SCNetBBoxHead\n\n__all__ = [\n    'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',\n    'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',\n    'SCNetBBoxHead'\n]\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.runner import BaseModule, auto_fp16, force_fp32\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.core import build_bbox_coder, multi_apply, multiclass_nms\nfrom mmdet.models.builder import HEADS, build_loss\nfrom mmdet.models.losses import accuracy\nfrom mmdet.models.utils import build_linear_layer\n\n\n@HEADS.register_module()\nclass BBoxHead(BaseModule):\n    \"\"\"Simplest RoI head, with only two fc layers for classification and\n    regression respectively.\"\"\"\n\n    def __init__(self,\n                 with_avg_pool=False,\n                 with_cls=True,\n                 with_reg=True,\n                 roi_feat_size=7,\n                 in_channels=256,\n                 num_classes=80,\n                 bbox_coder=dict(\n                     type='DeltaXYWHBBoxCoder',\n                     clip_border=True,\n                     target_means=[0., 0., 0., 0.],\n                     target_stds=[0.1, 0.1, 0.2, 0.2]),\n                 reg_class_agnostic=False,\n                 reg_decoded_bbox=False,\n                 reg_predictor_cfg=dict(type='Linear'),\n                 cls_predictor_cfg=dict(type='Linear'),\n                 loss_cls=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=False,\n                     loss_weight=1.0),\n                 loss_bbox=dict(\n                     type='SmoothL1Loss', beta=1.0, loss_weight=1.0),\n                 init_cfg=None):\n        super(BBoxHead, self).__init__(init_cfg)\n        assert with_cls or with_reg\n        self.with_avg_pool = with_avg_pool\n        self.with_cls = with_cls\n        self.with_reg = with_reg\n        self.roi_feat_size = _pair(roi_feat_size)\n        self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]\n        self.in_channels = in_channels\n        self.num_classes = num_classes\n        self.reg_class_agnostic = reg_class_agnostic\n        self.reg_decoded_bbox = reg_decoded_bbox\n        self.reg_predictor_cfg = reg_predictor_cfg\n        self.cls_predictor_cfg = cls_predictor_cfg\n        self.fp16_enabled = False\n\n        self.bbox_coder = build_bbox_coder(bbox_coder)\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_bbox = build_loss(loss_bbox)\n\n        in_channels = self.in_channels\n        if self.with_avg_pool:\n            self.avg_pool = nn.AvgPool2d(self.roi_feat_size)\n        else:\n            in_channels *= self.roi_feat_area\n        if self.with_cls:\n            # need to add background class\n            if self.custom_cls_channels:\n                cls_channels = self.loss_cls.get_cls_channels(self.num_classes)\n            else:\n                cls_channels = num_classes + 1\n            self.fc_cls = build_linear_layer(\n                self.cls_predictor_cfg,\n                in_features=in_channels,\n                out_features=cls_channels)\n        if self.with_reg:\n            out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes\n            self.fc_reg = build_linear_layer(\n                self.reg_predictor_cfg,\n                in_features=in_channels,\n                out_features=out_dim_reg)\n        self.debug_imgs = None\n        if init_cfg is None:\n            self.init_cfg = []\n            if self.with_cls:\n                self.init_cfg += [\n                    dict(\n                        type='Normal', std=0.01, override=dict(name='fc_cls'))\n                ]\n            if self.with_reg:\n                self.init_cfg += [\n                    dict(\n                        type='Normal', std=0.001, override=dict(name='fc_reg'))\n                ]\n\n    @property\n    def custom_cls_channels(self):\n        return getattr(self.loss_cls, 'custom_cls_channels', False)\n\n    @property\n    def custom_activation(self):\n        return getattr(self.loss_cls, 'custom_activation', False)\n\n    @property\n    def custom_accuracy(self):\n        return getattr(self.loss_cls, 'custom_accuracy', False)\n\n    @auto_fp16()\n    def forward(self, x):\n        if self.with_avg_pool:\n            if x.numel() > 0:\n                x = self.avg_pool(x)\n                x = x.view(x.size(0), -1)\n            else:\n                # avg_pool does not support empty tensor,\n                # so use torch.mean instead it\n                x = torch.mean(x, dim=(-1, -2))\n        cls_score = self.fc_cls(x) if self.with_cls else None\n        bbox_pred = self.fc_reg(x) if self.with_reg else None\n        return cls_score, bbox_pred\n\n    def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,\n                           pos_gt_labels, cfg):\n        \"\"\"Calculate the ground truth for proposals in the single image\n        according to the sampling results.\n\n        Args:\n            pos_bboxes (Tensor): Contains all the positive boxes,\n                has shape (num_pos, 4), the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            neg_bboxes (Tensor): Contains all the negative boxes,\n                has shape (num_neg, 4), the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            pos_gt_bboxes (Tensor): Contains gt_boxes for\n                all positive samples, has shape (num_pos, 4),\n                the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            pos_gt_labels (Tensor): Contains gt_labels for\n                all positive samples, has shape (num_pos, ).\n            cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.\n\n        Returns:\n            Tuple[Tensor]: Ground truth for proposals\n            in a single image. Containing the following Tensors:\n\n                - labels(Tensor): Gt_labels for all proposals, has\n                  shape (num_proposals,).\n                - label_weights(Tensor): Labels_weights for all\n                  proposals, has shape (num_proposals,).\n                - bbox_targets(Tensor):Regression target for all\n                  proposals, has shape (num_proposals, 4), the\n                  last dimension 4 represents [tl_x, tl_y, br_x, br_y].\n                - bbox_weights(Tensor):Regression weights for all\n                  proposals, has shape (num_proposals, 4).\n        \"\"\"\n        num_pos = pos_bboxes.size(0)\n        num_neg = neg_bboxes.size(0)\n        num_samples = num_pos + num_neg\n\n        # original implementation uses new_zeros since BG are set to be 0\n        # now use empty & fill because BG cat_id = num_classes,\n        # FG cat_id = [0, num_classes-1]\n        labels = pos_bboxes.new_full((num_samples, ),\n                                     self.num_classes,\n                                     dtype=torch.long)\n        label_weights = pos_bboxes.new_zeros(num_samples)\n        bbox_targets = pos_bboxes.new_zeros(num_samples, 4)\n        bbox_weights = pos_bboxes.new_zeros(num_samples, 4)\n        if num_pos > 0:\n            labels[:num_pos] = pos_gt_labels\n            pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n            label_weights[:num_pos] = pos_weight\n            if not self.reg_decoded_bbox:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    pos_bboxes, pos_gt_bboxes)\n            else:\n                # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n                # is applied directly on the decoded bounding boxes, both\n                # the predicted boxes and regression targets should be with\n                # absolute coordinate format.\n                pos_bbox_targets = pos_gt_bboxes\n            bbox_targets[:num_pos, :] = pos_bbox_targets\n            bbox_weights[:num_pos, :] = 1\n        if num_neg > 0:\n            label_weights[-num_neg:] = 1.0\n\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    def get_targets(self,\n                    sampling_results,\n                    gt_bboxes,\n                    gt_labels,\n                    rcnn_train_cfg,\n                    concat=True):\n        \"\"\"Calculate the ground truth for all samples in a batch according to\n        the sampling_results.\n\n        Almost the same as the implementation in bbox_head, we passed\n        additional parameters pos_inds_list and neg_inds_list to\n        `_get_target_single` function.\n\n        Args:\n            sampling_results (List[obj:SamplingResults]): Assign results of\n                all images in a batch after sampling.\n            gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,\n                each tensor has shape (num_gt, 4),  the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            gt_labels (list[Tensor]): Gt_labels of all images in a batch,\n                each tensor has shape (num_gt,).\n            rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n            concat (bool): Whether to concatenate the results of all\n                the images in a single batch.\n\n        Returns:\n            Tuple[Tensor]: Ground truth for proposals in a single image.\n            Containing the following list of Tensors:\n\n                - labels (list[Tensor],Tensor): Gt_labels for all\n                  proposals in a batch, each tensor in list has\n                  shape (num_proposals,) when `concat=False`, otherwise\n                  just a single tensor has shape (num_all_proposals,).\n                - label_weights (list[Tensor]): Labels_weights for\n                  all proposals in a batch, each tensor in list has\n                  shape (num_proposals,) when `concat=False`, otherwise\n                  just a single tensor has shape (num_all_proposals,).\n                - bbox_targets (list[Tensor],Tensor): Regression target\n                  for all proposals in a batch, each tensor in list\n                  has shape (num_proposals, 4) when `concat=False`,\n                  otherwise just a single tensor has shape\n                  (num_all_proposals, 4), the last dimension 4 represents\n                  [tl_x, tl_y, br_x, br_y].\n                - bbox_weights (list[tensor],Tensor): Regression weights for\n                  all proposals in a batch, each tensor in list has shape\n                  (num_proposals, 4) when `concat=False`, otherwise just a\n                  single tensor has shape (num_all_proposals, 4).\n        \"\"\"\n        pos_bboxes_list = [res.pos_bboxes for res in sampling_results]\n        neg_bboxes_list = [res.neg_bboxes for res in sampling_results]\n        pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]\n        pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]\n        labels, label_weights, bbox_targets, bbox_weights = multi_apply(\n            self._get_target_single,\n            pos_bboxes_list,\n            neg_bboxes_list,\n            pos_gt_bboxes_list,\n            pos_gt_labels_list,\n            cfg=rcnn_train_cfg)\n\n        if concat:\n            labels = torch.cat(labels, 0)\n            label_weights = torch.cat(label_weights, 0)\n            bbox_targets = torch.cat(bbox_targets, 0)\n            bbox_weights = torch.cat(bbox_weights, 0)\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n    def loss(self,\n             cls_score,\n             bbox_pred,\n             rois,\n             labels,\n             label_weights,\n             bbox_targets,\n             bbox_weights,\n             reduction_override=None):\n        losses = dict()\n        if cls_score is not None:\n            avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n            if cls_score.numel() > 0:\n                loss_cls_ = self.loss_cls(\n                    cls_score,\n                    labels,\n                    label_weights,\n                    avg_factor=avg_factor,\n                    reduction_override=reduction_override)\n                if isinstance(loss_cls_, dict):\n                    losses.update(loss_cls_)\n                else:\n                    losses['loss_cls'] = loss_cls_\n                if self.custom_activation:\n                    acc_ = self.loss_cls.get_accuracy(cls_score, labels)\n                    losses.update(acc_)\n                else:\n                    losses['acc'] = accuracy(cls_score, labels)\n        if bbox_pred is not None:\n            bg_class_ind = self.num_classes\n            # 0~self.num_classes-1 are FG, self.num_classes is BG\n            pos_inds = (labels >= 0) & (labels < bg_class_ind)\n            # do not perform bounding box regression for BG anymore.\n            if pos_inds.any():\n                if self.reg_decoded_bbox:\n                    # When the regression loss (e.g. `IouLoss`,\n                    # `GIouLoss`, `DIouLoss`) is applied directly on\n                    # the decoded bounding boxes, it decodes the\n                    # already encoded coordinates to absolute format.\n                    bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)\n                if self.reg_class_agnostic:\n                    pos_bbox_pred = bbox_pred.view(\n                        bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]\n                else:\n                    pos_bbox_pred = bbox_pred.view(\n                        bbox_pred.size(0), -1,\n                        4)[pos_inds.type(torch.bool),\n                           labels[pos_inds.type(torch.bool)]]\n                losses['loss_bbox'] = self.loss_bbox(\n                    pos_bbox_pred,\n                    bbox_targets[pos_inds.type(torch.bool)],\n                    bbox_weights[pos_inds.type(torch.bool)],\n                    avg_factor=bbox_targets.size(0),\n                    reduction_override=reduction_override)\n            else:\n                losses['loss_bbox'] = bbox_pred[pos_inds].sum()\n        return losses\n\n    @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n    def get_bboxes(self,\n                   rois,\n                   cls_score,\n                   bbox_pred,\n                   img_shape,\n                   scale_factor,\n                   rescale=False,\n                   cfg=None):\n        \"\"\"Transform network output for a batch into bbox predictions.\n\n        Args:\n            rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).\n                last dimension 5 arrange as (batch_index, x1, y1, x2, y2).\n            cls_score (Tensor): Box scores, has shape\n                (num_boxes, num_classes + 1).\n            bbox_pred (Tensor, optional): Box energies / deltas.\n                has shape (num_boxes, num_classes * 4).\n            img_shape (Sequence[int], optional): Maximum bounds for boxes,\n                specifies (H, W, C) or (H, W).\n            scale_factor (ndarray): Scale factor of the\n               image arrange as (w_scale, h_scale, w_scale, h_scale).\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n            cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None\n\n        Returns:\n            tuple[Tensor, Tensor]:\n                First tensor is `det_bboxes`, has the shape\n                (num_boxes, 5) and last\n                dimension 5 represent (tl_x, tl_y, br_x, br_y, score).\n                Second tensor is the labels with shape (num_boxes, ).\n        \"\"\"\n\n        # some loss (Seesaw loss..) may have custom activation\n        if self.custom_cls_channels:\n            scores = self.loss_cls.get_activation(cls_score)\n        else:\n            scores = F.softmax(\n                cls_score, dim=-1) if cls_score is not None else None\n        # bbox_pred would be None in some detector when with_reg is False,\n        # e.g. Grid R-CNN.\n        if bbox_pred is not None:\n            bboxes = self.bbox_coder.decode(\n                rois[..., 1:], bbox_pred, max_shape=img_shape)\n        else:\n            bboxes = rois[:, 1:].clone()\n            if img_shape is not None:\n                bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])\n                bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])\n\n        if rescale and bboxes.size(0) > 0:\n            scale_factor = bboxes.new_tensor(scale_factor)\n            bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(\n                bboxes.size()[0], -1)\n\n        if cfg is None:\n            return bboxes, scores\n        else:\n            det_bboxes, det_labels = multiclass_nms(bboxes, scores,\n                                                    cfg.score_thr, cfg.nms,\n                                                    cfg.max_per_img)\n\n            return det_bboxes, det_labels\n\n    @force_fp32(apply_to=('bbox_preds', ))\n    def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):\n        \"\"\"Refine bboxes during training.\n\n        Args:\n            rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,\n                and bs is the sampled RoIs per image. The first column is\n                the image id and the next 4 columns are x1, y1, x2, y2.\n            labels (Tensor): Shape (n*bs, ).\n            bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).\n            pos_is_gts (list[Tensor]): Flags indicating if each positive bbox\n                is a gt bbox.\n            img_metas (list[dict]): Meta info of each image.\n\n        Returns:\n            list[Tensor]: Refined bboxes of each image in a mini-batch.\n\n        Example:\n            >>> # xdoctest: +REQUIRES(module:kwarray)\n            >>> import kwarray\n            >>> import numpy as np\n            >>> from mmdet.core.bbox.demodata import random_boxes\n            >>> self = BBoxHead(reg_class_agnostic=True)\n            >>> n_roi = 2\n            >>> n_img = 4\n            >>> scale = 512\n            >>> rng = np.random.RandomState(0)\n            >>> img_metas = [{'img_shape': (scale, scale)}\n            ...              for _ in range(n_img)]\n            >>> # Create rois in the expected format\n            >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)\n            >>> img_ids = torch.randint(0, n_img, (n_roi,))\n            >>> img_ids = img_ids.float()\n            >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)\n            >>> # Create other args\n            >>> labels = torch.randint(0, 2, (n_roi,)).long()\n            >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)\n            >>> # For each image, pretend random positive boxes are gts\n            >>> is_label_pos = (labels.numpy() > 0).astype(np.int)\n            >>> lbl_per_img = kwarray.group_items(is_label_pos,\n            ...                                   img_ids.numpy())\n            >>> pos_per_img = [sum(lbl_per_img.get(gid, []))\n            ...                for gid in range(n_img)]\n            >>> pos_is_gts = [\n            >>>     torch.randint(0, 2, (npos,)).byte().sort(\n            >>>         descending=True)[0]\n            >>>     for npos in pos_per_img\n            >>> ]\n            >>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,\n            >>>                    pos_is_gts, img_metas)\n            >>> print(bboxes_list)\n        \"\"\"\n        img_ids = rois[:, 0].long().unique(sorted=True)\n        assert img_ids.numel() <= len(img_metas)\n\n        bboxes_list = []\n        for i in range(len(img_metas)):\n            inds = torch.nonzero(\n                rois[:, 0] == i, as_tuple=False).squeeze(dim=1)\n            num_rois = inds.numel()\n\n            bboxes_ = rois[inds, 1:]\n            label_ = labels[inds]\n            bbox_pred_ = bbox_preds[inds]\n            img_meta_ = img_metas[i]\n            pos_is_gts_ = pos_is_gts[i]\n\n            bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,\n                                           img_meta_)\n\n            # filter gt bboxes\n            pos_keep = 1 - pos_is_gts_\n            keep_inds = pos_is_gts_.new_ones(num_rois)\n            keep_inds[:len(pos_is_gts_)] = pos_keep\n\n            bboxes_list.append(bboxes[keep_inds.type(torch.bool)])\n\n        return bboxes_list\n\n    @force_fp32(apply_to=('bbox_pred', ))\n    def regress_by_class(self, rois, label, bbox_pred, img_meta):\n        \"\"\"Regress the bbox for the predicted class. Used in Cascade R-CNN.\n\n        Args:\n            rois (Tensor): Rois from `rpn_head` or last stage\n                `bbox_head`, has shape (num_proposals, 4) or\n                (num_proposals, 5).\n            label (Tensor): Only used when `self.reg_class_agnostic`\n                is False, has shape (num_proposals, ).\n            bbox_pred (Tensor): Regression prediction of\n                current stage `bbox_head`. When `self.reg_class_agnostic`\n                is False, it has shape (n, num_classes * 4), otherwise\n                it has shape (n, 4).\n            img_meta (dict): Image meta info.\n\n        Returns:\n            Tensor: Regressed bboxes, the same shape as input rois.\n        \"\"\"\n\n        assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)\n\n        if not self.reg_class_agnostic:\n            label = label * 4\n            inds = torch.stack((label, label + 1, label + 2, label + 3), 1)\n            bbox_pred = torch.gather(bbox_pred, 1, inds)\n        assert bbox_pred.size(1) == 4\n\n        max_shape = img_meta['img_shape']\n\n        if rois.size(1) == 4:\n            new_rois = self.bbox_coder.decode(\n                rois, bbox_pred, max_shape=max_shape)\n        else:\n            bboxes = self.bbox_coder.decode(\n                rois[:, 1:], bbox_pred, max_shape=max_shape)\n            new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)\n\n        return new_rois\n\n    def onnx_export(self,\n                    rois,\n                    cls_score,\n                    bbox_pred,\n                    img_shape,\n                    cfg=None,\n                    **kwargs):\n        \"\"\"Transform network output for a batch into bbox predictions.\n\n        Args:\n            rois (Tensor): Boxes to be transformed.\n                Has shape (B, num_boxes, 5)\n            cls_score (Tensor): Box scores. has shape\n                (B, num_boxes, num_classes + 1), 1 represent the background.\n            bbox_pred (Tensor, optional): Box energies / deltas for,\n                has shape (B, num_boxes, num_classes * 4) when.\n            img_shape (torch.Tensor): Shape of image.\n            cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None\n\n        Returns:\n            tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]\n                and class labels of shape [N, num_det].\n        \"\"\"\n\n        assert rois.ndim == 3, 'Only support export two stage ' \\\n                               'model to ONNX ' \\\n                               'with batch dimension. '\n        if self.custom_cls_channels:\n            scores = self.loss_cls.get_activation(cls_score)\n        else:\n            scores = F.softmax(\n                cls_score, dim=-1) if cls_score is not None else None\n\n        if bbox_pred is not None:\n            bboxes = self.bbox_coder.decode(\n                rois[..., 1:], bbox_pred, max_shape=img_shape)\n        else:\n            bboxes = rois[..., 1:].clone()\n            if img_shape is not None:\n                max_shape = bboxes.new_tensor(img_shape)[..., :2]\n                min_xy = bboxes.new_tensor(0)\n                max_xy = torch.cat(\n                    [max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2)\n                bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)\n                bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)\n\n        # Replace multiclass_nms with ONNX::NonMaxSuppression in deployment\n        from mmdet.core.export import add_dummy_nms_for_onnx\n        max_output_boxes_per_class = cfg.nms.get('max_output_boxes_per_class',\n                                                 cfg.max_per_img)\n        iou_threshold = cfg.nms.get('iou_threshold', 0.5)\n        score_threshold = cfg.score_thr\n        nms_pre = cfg.get('deploy_nms_pre', -1)\n\n        scores = scores[..., :self.num_classes]\n        if self.reg_class_agnostic:\n            return add_dummy_nms_for_onnx(\n                bboxes,\n                scores,\n                max_output_boxes_per_class,\n                iou_threshold,\n                score_threshold,\n                pre_top_k=nms_pre,\n                after_top_k=cfg.max_per_img)\n        else:\n            batch_size = scores.shape[0]\n            labels = torch.arange(\n                self.num_classes, dtype=torch.long).to(scores.device)\n            labels = labels.view(1, 1, -1).expand_as(scores)\n            labels = labels.reshape(batch_size, -1)\n            scores = scores.reshape(batch_size, -1)\n            bboxes = bboxes.reshape(batch_size, -1, 4)\n\n            max_size = torch.max(img_shape)\n            # Offset bboxes of each class so that bboxes of different labels\n            #  do not overlap.\n            offsets = (labels * max_size + 1).unsqueeze(2)\n            bboxes_for_nms = bboxes + offsets\n\n            batch_dets, labels = add_dummy_nms_for_onnx(\n                bboxes_for_nms,\n                scores.unsqueeze(2),\n                max_output_boxes_per_class,\n                iou_threshold,\n                score_threshold,\n                pre_top_k=nms_pre,\n                after_top_k=cfg.max_per_img,\n                labels=labels)\n            # Offset the bboxes back after dummy nms.\n            offsets = (labels * max_size + 1).unsqueeze(2)\n            # Indexing + inplace operation fails with dynamic shape in ONNX\n            # original style: batch_dets[..., :4] -= offsets\n            bboxes, scores = batch_dets[..., 0:4], batch_dets[..., 4:5]\n            bboxes -= offsets\n            batch_dets = torch.cat([bboxes, scores], dim=2)\n            return batch_dets, labels\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.utils import build_linear_layer\nfrom .bbox_head import BBoxHead\n\n\n@HEADS.register_module()\nclass ConvFCBBoxHead(BBoxHead):\n    r\"\"\"More general bbox head, with shared conv and fc layers and two optional\n    separated branches.\n\n    .. code-block:: none\n\n                                    /-> cls convs -> cls fcs -> cls\n        shared convs -> shared fcs\n                                    \\-> reg convs -> reg fcs -> reg\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_shared_convs=0,\n                 num_shared_fcs=0,\n                 num_cls_convs=0,\n                 num_cls_fcs=0,\n                 num_reg_convs=0,\n                 num_reg_fcs=0,\n                 conv_out_channels=256,\n                 fc_out_channels=1024,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 init_cfg=None,\n                 *args,\n                 **kwargs):\n        super(ConvFCBBoxHead, self).__init__(\n            *args, init_cfg=init_cfg, **kwargs)\n        assert (num_shared_convs + num_shared_fcs + num_cls_convs +\n                num_cls_fcs + num_reg_convs + num_reg_fcs > 0)\n        if num_cls_convs > 0 or num_reg_convs > 0:\n            assert num_shared_fcs == 0\n        if not self.with_cls:\n            assert num_cls_convs == 0 and num_cls_fcs == 0\n        if not self.with_reg:\n            assert num_reg_convs == 0 and num_reg_fcs == 0\n        self.num_shared_convs = num_shared_convs\n        self.num_shared_fcs = num_shared_fcs\n        self.num_cls_convs = num_cls_convs\n        self.num_cls_fcs = num_cls_fcs\n        self.num_reg_convs = num_reg_convs\n        self.num_reg_fcs = num_reg_fcs\n        self.conv_out_channels = conv_out_channels\n        self.fc_out_channels = fc_out_channels\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n\n        # add shared convs and fcs\n        self.shared_convs, self.shared_fcs, last_layer_dim = \\\n            self._add_conv_fc_branch(\n                self.num_shared_convs, self.num_shared_fcs, self.in_channels,\n                True)\n        self.shared_out_channels = last_layer_dim\n\n        # add cls specific branch\n        self.cls_convs, self.cls_fcs, self.cls_last_dim = \\\n            self._add_conv_fc_branch(\n                self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)\n\n        # add reg specific branch\n        self.reg_convs, self.reg_fcs, self.reg_last_dim = \\\n            self._add_conv_fc_branch(\n                self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)\n\n        if self.num_shared_fcs == 0 and not self.with_avg_pool:\n            if self.num_cls_fcs == 0:\n                self.cls_last_dim *= self.roi_feat_area\n            if self.num_reg_fcs == 0:\n                self.reg_last_dim *= self.roi_feat_area\n\n        self.relu = nn.ReLU(inplace=True)\n        # reconstruct fc_cls and fc_reg since input channels are changed\n        if self.with_cls:\n            if self.custom_cls_channels:\n                cls_channels = self.loss_cls.get_cls_channels(self.num_classes)\n            else:\n                cls_channels = self.num_classes + 1\n            self.fc_cls = build_linear_layer(\n                self.cls_predictor_cfg,\n                in_features=self.cls_last_dim,\n                out_features=cls_channels)\n        if self.with_reg:\n            out_dim_reg = (4 if self.reg_class_agnostic else 4 *\n                           self.num_classes)\n            self.fc_reg = build_linear_layer(\n                self.reg_predictor_cfg,\n                in_features=self.reg_last_dim,\n                out_features=out_dim_reg)\n\n        if init_cfg is None:\n            # when init_cfg is None,\n            # It has been set to\n            # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],\n            #  [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]\n            # after `super(ConvFCBBoxHead, self).__init__()`\n            # we only need to append additional configuration\n            # for `shared_fcs`, `cls_fcs` and `reg_fcs`\n            self.init_cfg += [\n                dict(\n                    type='Xavier',\n                    distribution='uniform',\n                    override=[\n                        dict(name='shared_fcs'),\n                        dict(name='cls_fcs'),\n                        dict(name='reg_fcs')\n                    ])\n            ]\n\n    def _add_conv_fc_branch(self,\n                            num_branch_convs,\n                            num_branch_fcs,\n                            in_channels,\n                            is_shared=False):\n        \"\"\"Add shared or separable branch.\n\n        convs -> avg pool (optional) -> fcs\n        \"\"\"\n        last_layer_dim = in_channels\n        # add branch specific conv layers\n        branch_convs = nn.ModuleList()\n        if num_branch_convs > 0:\n            for i in range(num_branch_convs):\n                conv_in_channels = (\n                    last_layer_dim if i == 0 else self.conv_out_channels)\n                branch_convs.append(\n                    ConvModule(\n                        conv_in_channels,\n                        self.conv_out_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg))\n            last_layer_dim = self.conv_out_channels\n        # add branch specific fc layers\n        branch_fcs = nn.ModuleList()\n        if num_branch_fcs > 0:\n            # for shared branch, only consider self.with_avg_pool\n            # for separated branches, also consider self.num_shared_fcs\n            if (is_shared\n                    or self.num_shared_fcs == 0) and not self.with_avg_pool:\n                last_layer_dim *= self.roi_feat_area\n            for i in range(num_branch_fcs):\n                fc_in_channels = (\n                    last_layer_dim if i == 0 else self.fc_out_channels)\n                branch_fcs.append(\n                    nn.Linear(fc_in_channels, self.fc_out_channels))\n            last_layer_dim = self.fc_out_channels\n        return branch_convs, branch_fcs, last_layer_dim\n\n    def forward(self, x):\n        # shared part\n        if self.num_shared_convs > 0:\n            for conv in self.shared_convs:\n                x = conv(x)\n\n        if self.num_shared_fcs > 0:\n            if self.with_avg_pool:\n                x = self.avg_pool(x)\n\n            x = x.flatten(1)\n\n            for fc in self.shared_fcs:\n                x = self.relu(fc(x))\n        # separate branches\n        x_cls = x\n        x_reg = x\n\n        for conv in self.cls_convs:\n            x_cls = conv(x_cls)\n        if x_cls.dim() > 2:\n            if self.with_avg_pool:\n                x_cls = self.avg_pool(x_cls)\n            x_cls = x_cls.flatten(1)\n        for fc in self.cls_fcs:\n            x_cls = self.relu(fc(x_cls))\n\n        for conv in self.reg_convs:\n            x_reg = conv(x_reg)\n        if x_reg.dim() > 2:\n            if self.with_avg_pool:\n                x_reg = self.avg_pool(x_reg)\n            x_reg = x_reg.flatten(1)\n        for fc in self.reg_fcs:\n            x_reg = self.relu(fc(x_reg))\n\n        cls_score = self.fc_cls(x_cls) if self.with_cls else None\n        bbox_pred = self.fc_reg(x_reg) if self.with_reg else None\n        return cls_score, bbox_pred\n\n\n@HEADS.register_module()\nclass Shared2FCBBoxHead(ConvFCBBoxHead):\n\n    def __init__(self, fc_out_channels=1024, *args, **kwargs):\n        super(Shared2FCBBoxHead, self).__init__(\n            num_shared_convs=0,\n            num_shared_fcs=2,\n            num_cls_convs=0,\n            num_cls_fcs=0,\n            num_reg_convs=0,\n            num_reg_fcs=0,\n            fc_out_channels=fc_out_channels,\n            *args,\n            **kwargs)\n\n\n@HEADS.register_module()\nclass Shared4Conv1FCBBoxHead(ConvFCBBoxHead):\n\n    def __init__(self, fc_out_channels=1024, *args, **kwargs):\n        super(Shared4Conv1FCBBoxHead, self).__init__(\n            num_shared_convs=4,\n            num_shared_fcs=1,\n            num_cls_convs=0,\n            num_cls_fcs=0,\n            num_reg_convs=0,\n            num_reg_fcs=0,\n            fc_out_channels=fc_out_channels,\n            *args,\n            **kwargs)\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/dii_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import (bias_init_with_prob, build_activation_layer,\n                      build_norm_layer)\nfrom mmcv.cnn.bricks.transformer import FFN, MultiheadAttention\nfrom mmcv.runner import auto_fp16, force_fp32\n\nfrom mmdet.core import multi_apply\nfrom mmdet.models.builder import HEADS, build_loss\nfrom mmdet.models.dense_heads.atss_head import reduce_mean\nfrom mmdet.models.losses import accuracy\nfrom mmdet.models.utils import build_transformer\nfrom .bbox_head import BBoxHead\n\n\n@HEADS.register_module()\nclass DIIHead(BBoxHead):\n    r\"\"\"Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object\n    Detection with Learnable Proposals <https://arxiv.org/abs/2011.12450>`_\n\n    Args:\n        num_classes (int): Number of class in dataset.\n            Defaults to 80.\n        num_ffn_fcs (int): The number of fully-connected\n            layers in FFNs. Defaults to 2.\n        num_heads (int): The hidden dimension of FFNs.\n            Defaults to 8.\n        num_cls_fcs (int): The number of fully-connected\n            layers in classification subnet. Defaults to 1.\n        num_reg_fcs (int): The number of fully-connected\n            layers in regression subnet. Defaults to 3.\n        feedforward_channels (int): The hidden dimension\n            of FFNs. Defaults to 2048\n        in_channels (int): Hidden_channels of MultiheadAttention.\n            Defaults to 256.\n        dropout (float): Probability of drop the channel.\n            Defaults to 0.0\n        ffn_act_cfg (dict): The activation config for FFNs.\n        dynamic_conv_cfg (dict): The convolution config\n            for DynamicConv.\n        loss_iou (dict): The config for iou or giou loss.\n\n    \"\"\"\n\n    def __init__(self,\n                 num_classes=80,\n                 num_ffn_fcs=2,\n                 num_heads=8,\n                 num_cls_fcs=1,\n                 num_reg_fcs=3,\n                 feedforward_channels=2048,\n                 in_channels=256,\n                 dropout=0.0,\n                 ffn_act_cfg=dict(type='ReLU', inplace=True),\n                 dynamic_conv_cfg=dict(\n                     type='DynamicConv',\n                     in_channels=256,\n                     feat_channels=64,\n                     out_channels=256,\n                     input_feat_shape=7,\n                     act_cfg=dict(type='ReLU', inplace=True),\n                     norm_cfg=dict(type='LN')),\n                 loss_iou=dict(type='GIoULoss', loss_weight=2.0),\n                 init_cfg=None,\n                 **kwargs):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super(DIIHead, self).__init__(\n            num_classes=num_classes,\n            reg_decoded_bbox=True,\n            reg_class_agnostic=True,\n            init_cfg=init_cfg,\n            **kwargs)\n        self.loss_iou = build_loss(loss_iou)\n        self.in_channels = in_channels\n        self.fp16_enabled = False\n        self.attention = MultiheadAttention(in_channels, num_heads, dropout)\n        self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1]\n\n        self.instance_interactive_conv = build_transformer(dynamic_conv_cfg)\n        self.instance_interactive_conv_dropout = nn.Dropout(dropout)\n        self.instance_interactive_conv_norm = build_norm_layer(\n            dict(type='LN'), in_channels)[1]\n\n        self.ffn = FFN(\n            in_channels,\n            feedforward_channels,\n            num_ffn_fcs,\n            act_cfg=ffn_act_cfg,\n            dropout=dropout)\n        self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]\n\n        self.cls_fcs = nn.ModuleList()\n        for _ in range(num_cls_fcs):\n            self.cls_fcs.append(\n                nn.Linear(in_channels, in_channels, bias=False))\n            self.cls_fcs.append(\n                build_norm_layer(dict(type='LN'), in_channels)[1])\n            self.cls_fcs.append(\n                build_activation_layer(dict(type='ReLU', inplace=True)))\n\n        # over load the self.fc_cls in BBoxHead\n        if self.loss_cls.use_sigmoid:\n            self.fc_cls = nn.Linear(in_channels, self.num_classes)\n        else:\n            self.fc_cls = nn.Linear(in_channels, self.num_classes + 1)\n\n        self.reg_fcs = nn.ModuleList()\n        for _ in range(num_reg_fcs):\n            self.reg_fcs.append(\n                nn.Linear(in_channels, in_channels, bias=False))\n            self.reg_fcs.append(\n                build_norm_layer(dict(type='LN'), in_channels)[1])\n            self.reg_fcs.append(\n                build_activation_layer(dict(type='ReLU', inplace=True)))\n        # over load the self.fc_cls in BBoxHead\n        self.fc_reg = nn.Linear(in_channels, 4)\n\n        assert self.reg_class_agnostic, 'DIIHead only ' \\\n            'suppport `reg_class_agnostic=True` '\n        assert self.reg_decoded_bbox, 'DIIHead only ' \\\n            'suppport `reg_decoded_bbox=True`'\n\n    def init_weights(self):\n        \"\"\"Use xavier initialization for all weight parameter and set\n        classification head bias as a specific value when use focal loss.\"\"\"\n        super(DIIHead, self).init_weights()\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n            else:\n                # adopt the default initialization for\n                # the weight and bias of the layer norm\n                pass\n        if self.loss_cls.use_sigmoid:\n            bias_init = bias_init_with_prob(0.01)\n            nn.init.constant_(self.fc_cls.bias, bias_init)\n\n    @auto_fp16()\n    def forward(self, roi_feat, proposal_feat):\n        \"\"\"Forward function of Dynamic Instance Interactive Head.\n\n        Args:\n            roi_feat (Tensor): Roi-pooling features with shape\n                (batch_size*num_proposals, feature_dimensions,\n                pooling_h , pooling_w).\n            proposal_feat (Tensor): Intermediate feature get from\n                diihead in last stage, has shape\n                (batch_size, num_proposals, feature_dimensions)\n\n          Returns:\n                tuple[Tensor]: Usually a tuple of classification scores\n                and bbox prediction and a intermediate feature.\n\n                    - cls_scores (Tensor): Classification scores for\n                      all proposals, has shape\n                      (batch_size, num_proposals, num_classes).\n                    - bbox_preds (Tensor): Box energies / deltas for\n                      all proposals, has shape\n                      (batch_size, num_proposals, 4).\n                    - obj_feat (Tensor): Object feature before classification\n                      and regression subnet, has shape\n                      (batch_size, num_proposal, feature_dimensions).\n        \"\"\"\n        N, num_proposals = proposal_feat.shape[:2]\n\n        # Self attention\n        proposal_feat = proposal_feat.permute(1, 0, 2)\n        proposal_feat = self.attention_norm(self.attention(proposal_feat))\n        attn_feats = proposal_feat.permute(1, 0, 2)\n\n        # instance interactive\n        proposal_feat = attn_feats.reshape(-1, self.in_channels)\n        proposal_feat_iic = self.instance_interactive_conv(\n            proposal_feat, roi_feat)\n        proposal_feat = proposal_feat + self.instance_interactive_conv_dropout(\n            proposal_feat_iic)\n        obj_feat = self.instance_interactive_conv_norm(proposal_feat)\n\n        # FFN\n        obj_feat = self.ffn_norm(self.ffn(obj_feat))\n\n        cls_feat = obj_feat\n        reg_feat = obj_feat\n\n        for cls_layer in self.cls_fcs:\n            cls_feat = cls_layer(cls_feat)\n        for reg_layer in self.reg_fcs:\n            reg_feat = reg_layer(reg_feat)\n\n        cls_score = self.fc_cls(cls_feat).view(\n            N, num_proposals, self.num_classes\n            if self.loss_cls.use_sigmoid else self.num_classes + 1)\n        bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4)\n\n        return cls_score, bbox_delta, obj_feat.view(\n            N, num_proposals, self.in_channels), attn_feats\n\n    @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n    def loss(self,\n             cls_score,\n             bbox_pred,\n             labels,\n             label_weights,\n             bbox_targets,\n             bbox_weights,\n             imgs_whwh=None,\n             reduction_override=None,\n             **kwargs):\n        \"\"\"\"Loss function of DIIHead, get loss of all images.\n\n        Args:\n            cls_score (Tensor): Classification prediction\n                results of all class, has shape\n                (batch_size * num_proposals_single_image, num_classes)\n            bbox_pred (Tensor): Regression prediction results,\n                has shape\n                (batch_size * num_proposals_single_image, 4), the last\n                dimension 4 represents [tl_x, tl_y, br_x, br_y].\n            labels (Tensor): Label of each proposals, has shape\n                (batch_size * num_proposals_single_image\n            label_weights (Tensor): Classification loss\n                weight of each proposals, has shape\n                (batch_size * num_proposals_single_image\n            bbox_targets (Tensor): Regression targets of each\n                proposals, has shape\n                (batch_size * num_proposals_single_image, 4),\n                the last dimension 4 represents\n                [tl_x, tl_y, br_x, br_y].\n            bbox_weights (Tensor): Regression loss weight of each\n                proposals's coordinate, has shape\n                (batch_size * num_proposals_single_image, 4),\n            imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\\\n                shape (batch_size, num_proposals, 4), the last\n                dimension means\n                [img_width,img_height, img_width, img_height].\n            reduction_override (str, optional): The reduction\n                method used to override the original reduction\n                method of the loss. Options are \"none\",\n                \"mean\" and \"sum\". Defaults to None,\n\n            Returns:\n                dict[str, Tensor]: Dictionary of loss components\n        \"\"\"\n        losses = dict()\n        bg_class_ind = self.num_classes\n        # note in spare rcnn num_gt == num_pos\n        pos_inds = (labels >= 0) & (labels < bg_class_ind)\n        num_pos = pos_inds.sum().float()\n        avg_factor = reduce_mean(num_pos)\n        if cls_score is not None:\n            if cls_score.numel() > 0:\n                losses['loss_cls'] = self.loss_cls(\n                    cls_score,\n                    labels,\n                    label_weights,\n                    avg_factor=avg_factor,\n                    reduction_override=reduction_override)\n                losses['pos_acc'] = accuracy(cls_score[pos_inds],\n                                             labels[pos_inds])\n        if bbox_pred is not None:\n            # 0~self.num_classes-1 are FG, self.num_classes is BG\n            # do not perform bounding box regression for BG anymore.\n            if pos_inds.any():\n                pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0),\n                                                  4)[pos_inds.type(torch.bool)]\n                imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0),\n                                              4)[pos_inds.type(torch.bool)]\n                losses['loss_bbox'] = self.loss_bbox(\n                    pos_bbox_pred / imgs_whwh,\n                    bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh,\n                    bbox_weights[pos_inds.type(torch.bool)],\n                    avg_factor=avg_factor)\n                losses['loss_iou'] = self.loss_iou(\n                    pos_bbox_pred,\n                    bbox_targets[pos_inds.type(torch.bool)],\n                    bbox_weights[pos_inds.type(torch.bool)],\n                    avg_factor=avg_factor)\n            else:\n                losses['loss_bbox'] = bbox_pred.sum() * 0\n                losses['loss_iou'] = bbox_pred.sum() * 0\n        return losses\n\n    def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes,\n                           pos_gt_bboxes, pos_gt_labels, cfg):\n        \"\"\"Calculate the ground truth for proposals in the single image\n        according to the sampling results.\n\n        Almost the same as the implementation in `bbox_head`,\n        we add pos_inds and neg_inds to select positive and\n        negative samples instead of selecting the first num_pos\n        as positive samples.\n\n        Args:\n            pos_inds (Tensor): The length is equal to the\n                positive sample numbers contain all index\n                of the positive sample in the origin proposal set.\n            neg_inds (Tensor): The length is equal to the\n                negative sample numbers contain all index\n                of the negative sample in the origin proposal set.\n            pos_bboxes (Tensor): Contains all the positive boxes,\n                has shape (num_pos, 4), the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            neg_bboxes (Tensor): Contains all the negative boxes,\n                has shape (num_neg, 4), the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            pos_gt_bboxes (Tensor): Contains gt_boxes for\n                all positive samples, has shape (num_pos, 4),\n                the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            pos_gt_labels (Tensor): Contains gt_labels for\n                all positive samples, has shape (num_pos, ).\n            cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.\n\n        Returns:\n            Tuple[Tensor]: Ground truth for proposals in a single image.\n            Containing the following Tensors:\n\n                - labels(Tensor): Gt_labels for all proposals, has\n                  shape (num_proposals,).\n                - label_weights(Tensor): Labels_weights for all proposals, has\n                  shape (num_proposals,).\n                - bbox_targets(Tensor):Regression target for all proposals, has\n                  shape (num_proposals, 4), the last dimension 4\n                  represents [tl_x, tl_y, br_x, br_y].\n                - bbox_weights(Tensor):Regression weights for all proposals,\n                  has shape (num_proposals, 4).\n        \"\"\"\n        num_pos = pos_bboxes.size(0)\n        num_neg = neg_bboxes.size(0)\n        num_samples = num_pos + num_neg\n\n        # original implementation uses new_zeros since BG are set to be 0\n        # now use empty & fill because BG cat_id = num_classes,\n        # FG cat_id = [0, num_classes-1]\n        labels = pos_bboxes.new_full((num_samples, ),\n                                     self.num_classes,\n                                     dtype=torch.long)\n        label_weights = pos_bboxes.new_zeros(num_samples)\n        bbox_targets = pos_bboxes.new_zeros(num_samples, 4)\n        bbox_weights = pos_bboxes.new_zeros(num_samples, 4)\n        if num_pos > 0:\n            labels[pos_inds] = pos_gt_labels\n            pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n            label_weights[pos_inds] = pos_weight\n            if not self.reg_decoded_bbox:\n                pos_bbox_targets = self.bbox_coder.encode(\n                    pos_bboxes, pos_gt_bboxes)\n            else:\n                pos_bbox_targets = pos_gt_bboxes\n            bbox_targets[pos_inds, :] = pos_bbox_targets\n            bbox_weights[pos_inds, :] = 1\n        if num_neg > 0:\n            label_weights[neg_inds] = 1.0\n\n        return labels, label_weights, bbox_targets, bbox_weights\n\n    def get_targets(self,\n                    sampling_results,\n                    gt_bboxes,\n                    gt_labels,\n                    rcnn_train_cfg,\n                    concat=True):\n        \"\"\"Calculate the ground truth for all samples in a batch according to\n        the sampling_results.\n\n        Almost the same as the implementation in bbox_head, we passed\n        additional parameters pos_inds_list and neg_inds_list to\n        `_get_target_single` function.\n\n        Args:\n            sampling_results (List[obj:SamplingResults]): Assign results of\n                all images in a batch after sampling.\n            gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,\n                each tensor has shape (num_gt, 4),  the last dimension 4\n                represents [tl_x, tl_y, br_x, br_y].\n            gt_labels (list[Tensor]): Gt_labels of all images in a batch,\n                each tensor has shape (num_gt,).\n            rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN.\n            concat (bool): Whether to concatenate the results of all\n                the images in a single batch.\n\n        Returns:\n            Tuple[Tensor]: Ground truth for proposals in a single image.\n            Containing the following list of Tensors:\n\n                - labels (list[Tensor],Tensor): Gt_labels for all\n                  proposals in a batch, each tensor in list has\n                  shape (num_proposals,) when `concat=False`, otherwise just\n                  a single tensor has shape (num_all_proposals,).\n                - label_weights (list[Tensor]): Labels_weights for\n                  all proposals in a batch, each tensor in list has shape\n                  (num_proposals,) when `concat=False`, otherwise just a\n                  single tensor has shape (num_all_proposals,).\n                - bbox_targets (list[Tensor],Tensor): Regression target\n                  for all proposals in a batch, each tensor in list has\n                  shape (num_proposals, 4) when `concat=False`, otherwise\n                  just a single tensor has shape (num_all_proposals, 4),\n                  the last dimension 4 represents [tl_x, tl_y, br_x, br_y].\n                - bbox_weights (list[tensor],Tensor): Regression weights for\n                  all proposals in a batch, each tensor in list has shape\n                  (num_proposals, 4) when `concat=False`, otherwise just a\n                  single tensor has shape (num_all_proposals, 4).\n        \"\"\"\n        pos_inds_list = [res.pos_inds for res in sampling_results]\n        neg_inds_list = [res.neg_inds for res in sampling_results]\n        pos_bboxes_list = [res.pos_bboxes for res in sampling_results]\n        neg_bboxes_list = [res.neg_bboxes for res in sampling_results]\n        pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]\n        pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]\n        labels, label_weights, bbox_targets, bbox_weights = multi_apply(\n            self._get_target_single,\n            pos_inds_list,\n            neg_inds_list,\n            pos_bboxes_list,\n            neg_bboxes_list,\n            pos_gt_bboxes_list,\n            pos_gt_labels_list,\n            cfg=rcnn_train_cfg)\n        if concat:\n            labels = torch.cat(labels, 0)\n            label_weights = torch.cat(label_weights, 0)\n            bbox_targets = torch.cat(bbox_targets, 0)\n            bbox_weights = torch.cat(bbox_weights, 0)\n        return labels, label_weights, bbox_targets, bbox_weights\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/double_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule, ModuleList\n\nfrom mmdet.models.backbones.resnet import Bottleneck\nfrom mmdet.models.builder import HEADS\nfrom .bbox_head import BBoxHead\n\n\nclass BasicResBlock(BaseModule):\n    \"\"\"Basic residual block.\n\n    This block is a little different from the block in the ResNet backbone.\n    The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.\n\n    Args:\n        in_channels (int): Channels of the input feature map.\n        out_channels (int): Channels of the output feature map.\n        conv_cfg (dict): The config dict for convolution layers.\n        norm_cfg (dict): The config dict for normalization layers.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 init_cfg=None):\n        super(BasicResBlock, self).__init__(init_cfg)\n\n        # main path\n        self.conv1 = ConvModule(\n            in_channels,\n            in_channels,\n            kernel_size=3,\n            padding=1,\n            bias=False,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg)\n        self.conv2 = ConvModule(\n            in_channels,\n            out_channels,\n            kernel_size=1,\n            bias=False,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n        # identity path\n        self.conv_identity = ConvModule(\n            in_channels,\n            out_channels,\n            kernel_size=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n        self.relu = nn.ReLU(inplace=True)\n\n    def forward(self, x):\n        identity = x\n\n        x = self.conv1(x)\n        x = self.conv2(x)\n\n        identity = self.conv_identity(identity)\n        out = x + identity\n\n        out = self.relu(out)\n        return out\n\n\n@HEADS.register_module()\nclass DoubleConvFCBBoxHead(BBoxHead):\n    r\"\"\"Bbox head used in Double-Head R-CNN\n\n    .. code-block:: none\n\n                                          /-> cls\n                      /-> shared convs ->\n                                          \\-> reg\n        roi features\n                                          /-> cls\n                      \\-> shared fc    ->\n                                          \\-> reg\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_convs=0,\n                 num_fcs=0,\n                 conv_out_channels=1024,\n                 fc_out_channels=1024,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 init_cfg=dict(\n                     type='Normal',\n                     override=[\n                         dict(type='Normal', name='fc_cls', std=0.01),\n                         dict(type='Normal', name='fc_reg', std=0.001),\n                         dict(\n                             type='Xavier',\n                             name='fc_branch',\n                             distribution='uniform')\n                     ]),\n                 **kwargs):\n        kwargs.setdefault('with_avg_pool', True)\n        super(DoubleConvFCBBoxHead, self).__init__(init_cfg=init_cfg, **kwargs)\n        assert self.with_avg_pool\n        assert num_convs > 0\n        assert num_fcs > 0\n        self.num_convs = num_convs\n        self.num_fcs = num_fcs\n        self.conv_out_channels = conv_out_channels\n        self.fc_out_channels = fc_out_channels\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n\n        # increase the channel of input features\n        self.res_block = BasicResBlock(self.in_channels,\n                                       self.conv_out_channels)\n\n        # add conv heads\n        self.conv_branch = self._add_conv_branch()\n        # add fc heads\n        self.fc_branch = self._add_fc_branch()\n\n        out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes\n        self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)\n\n        self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)\n        self.relu = nn.ReLU(inplace=True)\n\n    def _add_conv_branch(self):\n        \"\"\"Add the fc branch which consists of a sequential of conv layers.\"\"\"\n        branch_convs = ModuleList()\n        for i in range(self.num_convs):\n            branch_convs.append(\n                Bottleneck(\n                    inplanes=self.conv_out_channels,\n                    planes=self.conv_out_channels // 4,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        return branch_convs\n\n    def _add_fc_branch(self):\n        \"\"\"Add the fc branch which consists of a sequential of fc layers.\"\"\"\n        branch_fcs = ModuleList()\n        for i in range(self.num_fcs):\n            fc_in_channels = (\n                self.in_channels *\n                self.roi_feat_area if i == 0 else self.fc_out_channels)\n            branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))\n        return branch_fcs\n\n    def forward(self, x_cls, x_reg):\n        # conv head\n        x_conv = self.res_block(x_reg)\n\n        for conv in self.conv_branch:\n            x_conv = conv(x_conv)\n\n        if self.with_avg_pool:\n            x_conv = self.avg_pool(x_conv)\n\n        x_conv = x_conv.view(x_conv.size(0), -1)\n        bbox_pred = self.fc_reg(x_conv)\n\n        # fc head\n        x_fc = x_cls.view(x_cls.size(0), -1)\n        for fc in self.fc_branch:\n            x_fc = self.relu(fc(x_fc))\n\n        cls_score = self.fc_cls(x_fc)\n\n        return cls_score, bbox_pred\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/sabl_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule, force_fp32\n\nfrom mmdet.core import build_bbox_coder, multi_apply, multiclass_nms\nfrom mmdet.models.builder import HEADS, build_loss\nfrom mmdet.models.losses import accuracy\n\n\n@HEADS.register_module()\nclass SABLHead(BaseModule):\n    \"\"\"Side-Aware Boundary Localization (SABL) for RoI-Head.\n\n    Side-Aware features are extracted by conv layers\n    with an attention mechanism.\n    Boundary Localization with Bucketing and Bucketing Guided Rescoring\n    are implemented in BucketingBBoxCoder.\n\n    Please refer to https://arxiv.org/abs/1912.04260 for more details.\n\n    Args:\n        cls_in_channels (int): Input channels of cls RoI feature. \\\n            Defaults to 256.\n        reg_in_channels (int): Input channels of reg RoI feature. \\\n            Defaults to 256.\n        roi_feat_size (int): Size of RoI features. Defaults to 7.\n        reg_feat_up_ratio (int): Upsample ratio of reg features. \\\n            Defaults to 2.\n        reg_pre_kernel (int): Kernel of 2D conv layers before \\\n            attention pooling. Defaults to 3.\n        reg_post_kernel (int): Kernel of 1D conv layers after \\\n            attention pooling. Defaults to 3.\n        reg_pre_num (int): Number of pre convs. Defaults to 2.\n        reg_post_num (int): Number of post convs. Defaults to 1.\n        num_classes (int): Number of classes in dataset. Defaults to 80.\n        cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.\n        reg_offset_out_channels (int): Hidden and output channel \\\n            of reg offset branch. Defaults to 256.\n        reg_cls_out_channels (int): Hidden and output channel \\\n            of reg cls branch. Defaults to 256.\n        num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.\n        num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.\n        reg_class_agnostic (bool): Class agnostic regression or not. \\\n            Defaults to True.\n        norm_cfg (dict): Config of norm layers. Defaults to None.\n        bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.\n        loss_cls (dict): Config of classification loss.\n        loss_bbox_cls (dict): Config of classification loss for bbox branch.\n        loss_bbox_reg (dict): Config of regression loss for bbox branch.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 cls_in_channels=256,\n                 reg_in_channels=256,\n                 roi_feat_size=7,\n                 reg_feat_up_ratio=2,\n                 reg_pre_kernel=3,\n                 reg_post_kernel=3,\n                 reg_pre_num=2,\n                 reg_post_num=1,\n                 cls_out_channels=1024,\n                 reg_offset_out_channels=256,\n                 reg_cls_out_channels=256,\n                 num_cls_fcs=1,\n                 num_reg_fcs=0,\n                 reg_class_agnostic=True,\n                 norm_cfg=None,\n                 bbox_coder=dict(\n                     type='BucketingBBoxCoder',\n                     num_buckets=14,\n                     scale_factor=1.7),\n                 loss_cls=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=False,\n                     loss_weight=1.0),\n                 loss_bbox_cls=dict(\n                     type='CrossEntropyLoss',\n                     use_sigmoid=True,\n                     loss_weight=1.0),\n                 loss_bbox_reg=dict(\n                     type='SmoothL1Loss', beta=0.1, loss_weight=1.0),\n                 init_cfg=None):\n        super(SABLHead, self).__init__(init_cfg)\n        self.cls_in_channels = cls_in_channels\n        self.reg_in_channels = reg_in_channels\n        self.roi_feat_size = roi_feat_size\n        self.reg_feat_up_ratio = int(reg_feat_up_ratio)\n        self.num_buckets = bbox_coder['num_buckets']\n        assert self.reg_feat_up_ratio // 2 >= 1\n        self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio\n        assert self.up_reg_feat_size == bbox_coder['num_buckets']\n        self.reg_pre_kernel = reg_pre_kernel\n        self.reg_post_kernel = reg_post_kernel\n        self.reg_pre_num = reg_pre_num\n        self.reg_post_num = reg_post_num\n        self.num_classes = num_classes\n        self.cls_out_channels = cls_out_channels\n        self.reg_offset_out_channels = reg_offset_out_channels\n        self.reg_cls_out_channels = reg_cls_out_channels\n        self.num_cls_fcs = num_cls_fcs\n        self.num_reg_fcs = num_reg_fcs\n        self.reg_class_agnostic = reg_class_agnostic\n        assert self.reg_class_agnostic\n        self.norm_cfg = norm_cfg\n\n        self.bbox_coder = build_bbox_coder(bbox_coder)\n        self.loss_cls = build_loss(loss_cls)\n        self.loss_bbox_cls = build_loss(loss_bbox_cls)\n        self.loss_bbox_reg = build_loss(loss_bbox_reg)\n\n        self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,\n                                           self.cls_in_channels,\n                                           self.roi_feat_size,\n                                           self.cls_out_channels)\n\n        self.side_num = int(np.ceil(self.num_buckets / 2))\n\n        if self.reg_feat_up_ratio > 1:\n            self.upsample_x = nn.ConvTranspose1d(\n                reg_in_channels,\n                reg_in_channels,\n                self.reg_feat_up_ratio,\n                stride=self.reg_feat_up_ratio)\n            self.upsample_y = nn.ConvTranspose1d(\n                reg_in_channels,\n                reg_in_channels,\n                self.reg_feat_up_ratio,\n                stride=self.reg_feat_up_ratio)\n\n        self.reg_pre_convs = nn.ModuleList()\n        for i in range(self.reg_pre_num):\n            reg_pre_conv = ConvModule(\n                reg_in_channels,\n                reg_in_channels,\n                kernel_size=reg_pre_kernel,\n                padding=reg_pre_kernel // 2,\n                norm_cfg=norm_cfg,\n                act_cfg=dict(type='ReLU'))\n            self.reg_pre_convs.append(reg_pre_conv)\n\n        self.reg_post_conv_xs = nn.ModuleList()\n        for i in range(self.reg_post_num):\n            reg_post_conv_x = ConvModule(\n                reg_in_channels,\n                reg_in_channels,\n                kernel_size=(1, reg_post_kernel),\n                padding=(0, reg_post_kernel // 2),\n                norm_cfg=norm_cfg,\n                act_cfg=dict(type='ReLU'))\n            self.reg_post_conv_xs.append(reg_post_conv_x)\n        self.reg_post_conv_ys = nn.ModuleList()\n        for i in range(self.reg_post_num):\n            reg_post_conv_y = ConvModule(\n                reg_in_channels,\n                reg_in_channels,\n                kernel_size=(reg_post_kernel, 1),\n                padding=(reg_post_kernel // 2, 0),\n                norm_cfg=norm_cfg,\n                act_cfg=dict(type='ReLU'))\n            self.reg_post_conv_ys.append(reg_post_conv_y)\n\n        self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)\n        self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)\n\n        self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)\n        self.relu = nn.ReLU(inplace=True)\n\n        self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,\n                                               self.reg_in_channels, 1,\n                                               self.reg_cls_out_channels)\n        self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,\n                                                  self.reg_in_channels, 1,\n                                                  self.reg_offset_out_channels)\n        self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)\n        self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)\n\n        if init_cfg is None:\n            self.init_cfg = [\n                dict(\n                    type='Xavier',\n                    layer='Linear',\n                    distribution='uniform',\n                    override=[\n                        dict(type='Normal', name='reg_conv_att_x', std=0.01),\n                        dict(type='Normal', name='reg_conv_att_y', std=0.01),\n                        dict(type='Normal', name='fc_reg_cls', std=0.01),\n                        dict(type='Normal', name='fc_cls', std=0.01),\n                        dict(type='Normal', name='fc_reg_offset', std=0.001)\n                    ])\n            ]\n            if self.reg_feat_up_ratio > 1:\n                self.init_cfg += [\n                    dict(\n                        type='Kaiming',\n                        distribution='normal',\n                        override=[\n                            dict(name='upsample_x'),\n                            dict(name='upsample_y')\n                        ])\n                ]\n\n    @property\n    def custom_cls_channels(self):\n        return getattr(self.loss_cls, 'custom_cls_channels', False)\n\n    @property\n    def custom_activation(self):\n        return getattr(self.loss_cls, 'custom_activation', False)\n\n    @property\n    def custom_accuracy(self):\n        return getattr(self.loss_cls, 'custom_accuracy', False)\n\n    def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,\n                       fc_out_channels):\n        in_channels = in_channels * roi_feat_size * roi_feat_size\n        branch_fcs = nn.ModuleList()\n        for i in range(num_branch_fcs):\n            fc_in_channels = (in_channels if i == 0 else fc_out_channels)\n            branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))\n        return branch_fcs\n\n    def cls_forward(self, cls_x):\n        cls_x = cls_x.view(cls_x.size(0), -1)\n        for fc in self.cls_fcs:\n            cls_x = self.relu(fc(cls_x))\n        cls_score = self.fc_cls(cls_x)\n        return cls_score\n\n    def attention_pool(self, reg_x):\n        \"\"\"Extract direction-specific features fx and fy with attention\n        methanism.\"\"\"\n        reg_fx = reg_x\n        reg_fy = reg_x\n        reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()\n        reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()\n        reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)\n        reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)\n        reg_fx = (reg_fx * reg_fx_att).sum(dim=2)\n        reg_fy = (reg_fy * reg_fy_att).sum(dim=3)\n        return reg_fx, reg_fy\n\n    def side_aware_feature_extractor(self, reg_x):\n        \"\"\"Refine and extract side-aware features without split them.\"\"\"\n        for reg_pre_conv in self.reg_pre_convs:\n            reg_x = reg_pre_conv(reg_x)\n        reg_fx, reg_fy = self.attention_pool(reg_x)\n\n        if self.reg_post_num > 0:\n            reg_fx = reg_fx.unsqueeze(2)\n            reg_fy = reg_fy.unsqueeze(3)\n            for i in range(self.reg_post_num):\n                reg_fx = self.reg_post_conv_xs[i](reg_fx)\n                reg_fy = self.reg_post_conv_ys[i](reg_fy)\n            reg_fx = reg_fx.squeeze(2)\n            reg_fy = reg_fy.squeeze(3)\n        if self.reg_feat_up_ratio > 1:\n            reg_fx = self.relu(self.upsample_x(reg_fx))\n            reg_fy = self.relu(self.upsample_y(reg_fy))\n        reg_fx = torch.transpose(reg_fx, 1, 2)\n        reg_fy = torch.transpose(reg_fy, 1, 2)\n        return reg_fx.contiguous(), reg_fy.contiguous()\n\n    def reg_pred(self, x, offset_fcs, cls_fcs):\n        \"\"\"Predict bucketing estimation (cls_pred) and fine regression (offset\n        pred) with side-aware features.\"\"\"\n        x_offset = x.view(-1, self.reg_in_channels)\n        x_cls = x.view(-1, self.reg_in_channels)\n\n        for fc in offset_fcs:\n            x_offset = self.relu(fc(x_offset))\n        for fc in cls_fcs:\n            x_cls = self.relu(fc(x_cls))\n        offset_pred = self.fc_reg_offset(x_offset)\n        cls_pred = self.fc_reg_cls(x_cls)\n\n        offset_pred = offset_pred.view(x.size(0), -1)\n        cls_pred = cls_pred.view(x.size(0), -1)\n\n        return offset_pred, cls_pred\n\n    def side_aware_split(self, feat):\n        \"\"\"Split side-aware features aligned with orders of bucketing\n        targets.\"\"\"\n        l_end = int(np.ceil(self.up_reg_feat_size / 2))\n        r_start = int(np.floor(self.up_reg_feat_size / 2))\n        feat_fl = feat[:, :l_end]\n        feat_fr = feat[:, r_start:].flip(dims=(1, ))\n        feat_fl = feat_fl.contiguous()\n        feat_fr = feat_fr.contiguous()\n        feat = torch.cat([feat_fl, feat_fr], dim=-1)\n        return feat\n\n    def bbox_pred_split(self, bbox_pred, num_proposals_per_img):\n        \"\"\"Split batch bbox prediction back to each image.\"\"\"\n        bucket_cls_preds, bucket_offset_preds = bbox_pred\n        bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)\n        bucket_offset_preds = bucket_offset_preds.split(\n            num_proposals_per_img, 0)\n        bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))\n        return bbox_pred\n\n    def reg_forward(self, reg_x):\n        outs = self.side_aware_feature_extractor(reg_x)\n        edge_offset_preds = []\n        edge_cls_preds = []\n        reg_fx = outs[0]\n        reg_fy = outs[1]\n        offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,\n                                                  self.reg_cls_fcs)\n        offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,\n                                                  self.reg_cls_fcs)\n        offset_pred_x = self.side_aware_split(offset_pred_x)\n        offset_pred_y = self.side_aware_split(offset_pred_y)\n        cls_pred_x = self.side_aware_split(cls_pred_x)\n        cls_pred_y = self.side_aware_split(cls_pred_y)\n        edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)\n        edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)\n\n        return (edge_cls_preds, edge_offset_preds)\n\n    def forward(self, x):\n\n        bbox_pred = self.reg_forward(x)\n        cls_score = self.cls_forward(x)\n\n        return cls_score, bbox_pred\n\n    def get_targets(self, sampling_results, gt_bboxes, gt_labels,\n                    rcnn_train_cfg):\n        pos_proposals = [res.pos_bboxes for res in sampling_results]\n        neg_proposals = [res.neg_bboxes for res in sampling_results]\n        pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]\n        pos_gt_labels = [res.pos_gt_labels for res in sampling_results]\n        cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals,\n                                             pos_gt_bboxes, pos_gt_labels,\n                                             rcnn_train_cfg)\n        (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n         bucket_offset_targets, bucket_offset_weights) = cls_reg_targets\n        return (labels, label_weights, (bucket_cls_targets,\n                                        bucket_offset_targets),\n                (bucket_cls_weights, bucket_offset_weights))\n\n    def bucket_target(self,\n                      pos_proposals_list,\n                      neg_proposals_list,\n                      pos_gt_bboxes_list,\n                      pos_gt_labels_list,\n                      rcnn_train_cfg,\n                      concat=True):\n        (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n         bucket_offset_targets, bucket_offset_weights) = multi_apply(\n             self._bucket_target_single,\n             pos_proposals_list,\n             neg_proposals_list,\n             pos_gt_bboxes_list,\n             pos_gt_labels_list,\n             cfg=rcnn_train_cfg)\n\n        if concat:\n            labels = torch.cat(labels, 0)\n            label_weights = torch.cat(label_weights, 0)\n            bucket_cls_targets = torch.cat(bucket_cls_targets, 0)\n            bucket_cls_weights = torch.cat(bucket_cls_weights, 0)\n            bucket_offset_targets = torch.cat(bucket_offset_targets, 0)\n            bucket_offset_weights = torch.cat(bucket_offset_weights, 0)\n        return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n                bucket_offset_targets, bucket_offset_weights)\n\n    def _bucket_target_single(self, pos_proposals, neg_proposals,\n                              pos_gt_bboxes, pos_gt_labels, cfg):\n        \"\"\"Compute bucketing estimation targets and fine regression targets for\n        a single image.\n\n        Args:\n            pos_proposals (Tensor): positive proposals of a single image,\n                 Shape (n_pos, 4)\n            neg_proposals (Tensor): negative proposals of a single image,\n                 Shape (n_neg, 4).\n            pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals\n                 of a single image, Shape (n_pos, 4).\n            pos_gt_labels (Tensor): gt labels assigned to positive proposals\n                 of a single image, Shape (n_pos, ).\n            cfg (dict): Config of calculating targets\n\n        Returns:\n            tuple:\n\n                - labels (Tensor): Labels in a single image. \\\n                    Shape (n,).\n                - label_weights (Tensor): Label weights in a single image.\\\n                    Shape (n,)\n                - bucket_cls_targets (Tensor): Bucket cls targets in \\\n                    a single image. Shape (n, num_buckets*2).\n                - bucket_cls_weights (Tensor): Bucket cls weights in \\\n                    a single image. Shape (n, num_buckets*2).\n                - bucket_offset_targets (Tensor): Bucket offset targets \\\n                    in a single image. Shape (n, num_buckets*2).\n                - bucket_offset_targets (Tensor): Bucket offset weights \\\n                    in a single image. Shape (n, num_buckets*2).\n        \"\"\"\n        num_pos = pos_proposals.size(0)\n        num_neg = neg_proposals.size(0)\n        num_samples = num_pos + num_neg\n        labels = pos_gt_bboxes.new_full((num_samples, ),\n                                        self.num_classes,\n                                        dtype=torch.long)\n        label_weights = pos_proposals.new_zeros(num_samples)\n        bucket_cls_targets = pos_proposals.new_zeros(num_samples,\n                                                     4 * self.side_num)\n        bucket_cls_weights = pos_proposals.new_zeros(num_samples,\n                                                     4 * self.side_num)\n        bucket_offset_targets = pos_proposals.new_zeros(\n            num_samples, 4 * self.side_num)\n        bucket_offset_weights = pos_proposals.new_zeros(\n            num_samples, 4 * self.side_num)\n        if num_pos > 0:\n            labels[:num_pos] = pos_gt_labels\n            label_weights[:num_pos] = 1.0\n            (pos_bucket_offset_targets, pos_bucket_offset_weights,\n             pos_bucket_cls_targets,\n             pos_bucket_cls_weights) = self.bbox_coder.encode(\n                 pos_proposals, pos_gt_bboxes)\n            bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets\n            bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights\n            bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets\n            bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights\n        if num_neg > 0:\n            label_weights[-num_neg:] = 1.0\n        return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,\n                bucket_offset_targets, bucket_offset_weights)\n\n    def loss(self,\n             cls_score,\n             bbox_pred,\n             rois,\n             labels,\n             label_weights,\n             bbox_targets,\n             bbox_weights,\n             reduction_override=None):\n        losses = dict()\n        if cls_score is not None:\n            avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n            losses['loss_cls'] = self.loss_cls(\n                cls_score,\n                labels,\n                label_weights,\n                avg_factor=avg_factor,\n                reduction_override=reduction_override)\n            losses['acc'] = accuracy(cls_score, labels)\n\n        if bbox_pred is not None:\n            bucket_cls_preds, bucket_offset_preds = bbox_pred\n            bucket_cls_targets, bucket_offset_targets = bbox_targets\n            bucket_cls_weights, bucket_offset_weights = bbox_weights\n            # edge cls\n            bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)\n            bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)\n            bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)\n            losses['loss_bbox_cls'] = self.loss_bbox_cls(\n                bucket_cls_preds,\n                bucket_cls_targets,\n                bucket_cls_weights,\n                avg_factor=bucket_cls_targets.size(0),\n                reduction_override=reduction_override)\n\n            losses['loss_bbox_reg'] = self.loss_bbox_reg(\n                bucket_offset_preds,\n                bucket_offset_targets,\n                bucket_offset_weights,\n                avg_factor=bucket_offset_targets.size(0),\n                reduction_override=reduction_override)\n\n        return losses\n\n    @force_fp32(apply_to=('cls_score', 'bbox_pred'))\n    def get_bboxes(self,\n                   rois,\n                   cls_score,\n                   bbox_pred,\n                   img_shape,\n                   scale_factor,\n                   rescale=False,\n                   cfg=None):\n        if isinstance(cls_score, list):\n            cls_score = sum(cls_score) / float(len(cls_score))\n        scores = F.softmax(cls_score, dim=1) if cls_score is not None else None\n\n        if bbox_pred is not None:\n            bboxes, confidences = self.bbox_coder.decode(\n                rois[:, 1:], bbox_pred, img_shape)\n        else:\n            bboxes = rois[:, 1:].clone()\n            confidences = None\n            if img_shape is not None:\n                bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)\n                bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)\n\n        if rescale and bboxes.size(0) > 0:\n            if isinstance(scale_factor, float):\n                bboxes /= scale_factor\n            else:\n                bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)\n\n        if cfg is None:\n            return bboxes, scores\n        else:\n            det_bboxes, det_labels = multiclass_nms(\n                bboxes,\n                scores,\n                cfg.score_thr,\n                cfg.nms,\n                cfg.max_per_img,\n                score_factors=confidences)\n\n            return det_bboxes, det_labels\n\n    @force_fp32(apply_to=('bbox_preds', ))\n    def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):\n        \"\"\"Refine bboxes during training.\n\n        Args:\n            rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,\n                and bs is the sampled RoIs per image.\n            labels (Tensor): Shape (n*bs, ).\n            bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \\\n                (n*bs, num_buckets*2)].\n            pos_is_gts (list[Tensor]): Flags indicating if each positive bbox\n                is a gt bbox.\n            img_metas (list[dict]): Meta info of each image.\n\n        Returns:\n            list[Tensor]: Refined bboxes of each image in a mini-batch.\n        \"\"\"\n        img_ids = rois[:, 0].long().unique(sorted=True)\n        assert img_ids.numel() == len(img_metas)\n\n        bboxes_list = []\n        for i in range(len(img_metas)):\n            inds = torch.nonzero(\n                rois[:, 0] == i, as_tuple=False).squeeze(dim=1)\n            num_rois = inds.numel()\n\n            bboxes_ = rois[inds, 1:]\n            label_ = labels[inds]\n            edge_cls_preds, edge_offset_preds = bbox_preds\n            edge_cls_preds_ = edge_cls_preds[inds]\n            edge_offset_preds_ = edge_offset_preds[inds]\n            bbox_pred_ = [edge_cls_preds_, edge_offset_preds_]\n            img_meta_ = img_metas[i]\n            pos_is_gts_ = pos_is_gts[i]\n\n            bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,\n                                           img_meta_)\n            # filter gt bboxes\n            pos_keep = 1 - pos_is_gts_\n            keep_inds = pos_is_gts_.new_ones(num_rois)\n            keep_inds[:len(pos_is_gts_)] = pos_keep\n\n            bboxes_list.append(bboxes[keep_inds.type(torch.bool)])\n\n        return bboxes_list\n\n    @force_fp32(apply_to=('bbox_pred', ))\n    def regress_by_class(self, rois, label, bbox_pred, img_meta):\n        \"\"\"Regress the bbox for the predicted class. Used in Cascade R-CNN.\n\n        Args:\n            rois (Tensor): shape (n, 4) or (n, 5)\n            label (Tensor): shape (n, )\n            bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \\\n                (n, num_buckets *2)]\n            img_meta (dict): Image meta info.\n\n        Returns:\n            Tensor: Regressed bboxes, the same shape as input rois.\n        \"\"\"\n        assert rois.size(1) == 4 or rois.size(1) == 5\n\n        if rois.size(1) == 4:\n            new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,\n                                                 img_meta['img_shape'])\n        else:\n            bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,\n                                               img_meta['img_shape'])\n            new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)\n\n        return new_rois\n"
  },
  {
    "path": "mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.models.builder import HEADS\nfrom .convfc_bbox_head import ConvFCBBoxHead\n\n\n@HEADS.register_module()\nclass SCNetBBoxHead(ConvFCBBoxHead):\n    \"\"\"BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us\n    to get intermediate shared feature.\n    \"\"\"\n\n    def _forward_shared(self, x):\n        \"\"\"Forward function for shared part.\"\"\"\n        if self.num_shared_convs > 0:\n            for conv in self.shared_convs:\n                x = conv(x)\n\n        if self.num_shared_fcs > 0:\n            if self.with_avg_pool:\n                x = self.avg_pool(x)\n\n            x = x.flatten(1)\n\n            for fc in self.shared_fcs:\n                x = self.relu(fc(x))\n\n        return x\n\n    def _forward_cls_reg(self, x):\n        \"\"\"Forward function for classification and regression parts.\"\"\"\n        x_cls = x\n        x_reg = x\n\n        for conv in self.cls_convs:\n            x_cls = conv(x_cls)\n        if x_cls.dim() > 2:\n            if self.with_avg_pool:\n                x_cls = self.avg_pool(x_cls)\n            x_cls = x_cls.flatten(1)\n        for fc in self.cls_fcs:\n            x_cls = self.relu(fc(x_cls))\n\n        for conv in self.reg_convs:\n            x_reg = conv(x_reg)\n        if x_reg.dim() > 2:\n            if self.with_avg_pool:\n                x_reg = self.avg_pool(x_reg)\n            x_reg = x_reg.flatten(1)\n        for fc in self.reg_fcs:\n            x_reg = self.relu(fc(x_reg))\n\n        cls_score = self.fc_cls(x_cls) if self.with_cls else None\n        bbox_pred = self.fc_reg(x_reg) if self.with_reg else None\n\n        return cls_score, bbox_pred\n\n    def forward(self, x, return_shared_feat=False):\n        \"\"\"Forward function.\n\n        Args:\n            x (Tensor): input features\n            return_shared_feat (bool): If True, return cls-reg-shared feature.\n\n        Return:\n            out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,\n                if  ``return_shared_feat`` is True, append ``x_shared`` to the\n                returned tuple.\n        \"\"\"\n        x_shared = self._forward_shared(x)\n        out = self._forward_cls_reg(x_shared)\n\n        if return_shared_feat:\n            out += (x_shared, )\n\n        return out\n"
  },
  {
    "path": "mmdet/models/roi_heads/cascade_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner import ModuleList\n\nfrom mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner,\n                        build_sampler, merge_aug_bboxes, merge_aug_masks,\n                        multiclass_nms)\nfrom ..builder import HEADS, build_head, build_roi_extractor\nfrom .base_roi_head import BaseRoIHead\nfrom .test_mixins import BBoxTestMixin, MaskTestMixin\n\n\n@HEADS.register_module()\nclass CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):\n    \"\"\"Cascade roi head including one bbox head and one mask head.\n\n    https://arxiv.org/abs/1712.00726\n    \"\"\"\n\n    def __init__(self,\n                 num_stages,\n                 stage_loss_weights,\n                 bbox_roi_extractor=None,\n                 bbox_head=None,\n                 mask_roi_extractor=None,\n                 mask_head=None,\n                 shared_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        assert bbox_roi_extractor is not None\n        assert bbox_head is not None\n        assert shared_head is None, \\\n            'Shared head is not supported in Cascade RCNN anymore'\n\n        self.num_stages = num_stages\n        self.stage_loss_weights = stage_loss_weights\n        super(CascadeRoIHead, self).__init__(\n            bbox_roi_extractor=bbox_roi_extractor,\n            bbox_head=bbox_head,\n            mask_roi_extractor=mask_roi_extractor,\n            mask_head=mask_head,\n            shared_head=shared_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n\n    def init_bbox_head(self, bbox_roi_extractor, bbox_head):\n        \"\"\"Initialize box head and box roi extractor.\n\n        Args:\n            bbox_roi_extractor (dict): Config of box roi extractor.\n            bbox_head (dict): Config of box in box head.\n        \"\"\"\n        self.bbox_roi_extractor = ModuleList()\n        self.bbox_head = ModuleList()\n        if not isinstance(bbox_roi_extractor, list):\n            bbox_roi_extractor = [\n                bbox_roi_extractor for _ in range(self.num_stages)\n            ]\n        if not isinstance(bbox_head, list):\n            bbox_head = [bbox_head for _ in range(self.num_stages)]\n        assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages\n        for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):\n            self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))\n            self.bbox_head.append(build_head(head))\n\n    def init_mask_head(self, mask_roi_extractor, mask_head):\n        \"\"\"Initialize mask head and mask roi extractor.\n\n        Args:\n            mask_roi_extractor (dict): Config of mask roi extractor.\n            mask_head (dict): Config of mask in mask head.\n        \"\"\"\n        self.mask_head = nn.ModuleList()\n        if not isinstance(mask_head, list):\n            mask_head = [mask_head for _ in range(self.num_stages)]\n        assert len(mask_head) == self.num_stages\n        for head in mask_head:\n            self.mask_head.append(build_head(head))\n        if mask_roi_extractor is not None:\n            self.share_roi_extractor = False\n            self.mask_roi_extractor = ModuleList()\n            if not isinstance(mask_roi_extractor, list):\n                mask_roi_extractor = [\n                    mask_roi_extractor for _ in range(self.num_stages)\n                ]\n            assert len(mask_roi_extractor) == self.num_stages\n            for roi_extractor in mask_roi_extractor:\n                self.mask_roi_extractor.append(\n                    build_roi_extractor(roi_extractor))\n        else:\n            self.share_roi_extractor = True\n            self.mask_roi_extractor = self.bbox_roi_extractor\n\n    def init_assigner_sampler(self):\n        \"\"\"Initialize assigner and sampler for each stage.\"\"\"\n        self.bbox_assigner = []\n        self.bbox_sampler = []\n        if self.train_cfg is not None:\n            for idx, rcnn_train_cfg in enumerate(self.train_cfg):\n                self.bbox_assigner.append(\n                    build_assigner(rcnn_train_cfg.assigner))\n                self.current_stage = idx\n                self.bbox_sampler.append(\n                    build_sampler(rcnn_train_cfg.sampler, context=self))\n\n    def forward_dummy(self, x, proposals):\n        \"\"\"Dummy forward function.\"\"\"\n        # bbox head\n        outs = ()\n        rois = bbox2roi([proposals])\n        if self.with_bbox:\n            for i in range(self.num_stages):\n                bbox_results = self._bbox_forward(i, x, rois)\n                outs = outs + (bbox_results['cls_score'],\n                               bbox_results['bbox_pred'])\n        # mask heads\n        if self.with_mask:\n            mask_rois = rois[:100]\n            for i in range(self.num_stages):\n                mask_results = self._mask_forward(i, x, mask_rois)\n                outs = outs + (mask_results['mask_pred'], )\n        return outs\n\n    def _bbox_forward(self, stage, x, rois):\n        \"\"\"Box head forward function used in both training and testing.\"\"\"\n        bbox_roi_extractor = self.bbox_roi_extractor[stage]\n        bbox_head = self.bbox_head[stage]\n        bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n                                        rois)\n        # do not support caffe_c4 model anymore\n        cls_score, bbox_pred = bbox_head(bbox_feats)\n\n        bbox_results = dict(\n            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n        return bbox_results\n\n    def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,\n                            gt_labels, rcnn_train_cfg):\n        \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n        rois = bbox2roi([res.bboxes for res in sampling_results])\n        bbox_results = self._bbox_forward(stage, x, rois)\n        bbox_targets = self.bbox_head[stage].get_targets(\n            sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)\n        loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],\n                                               bbox_results['bbox_pred'], rois,\n                                               *bbox_targets)\n\n        bbox_results.update(\n            loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)\n        return bbox_results\n\n    def _mask_forward(self, stage, x, rois):\n        \"\"\"Mask head forward function used in both training and testing.\"\"\"\n        mask_roi_extractor = self.mask_roi_extractor[stage]\n        mask_head = self.mask_head[stage]\n        mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n                                        rois)\n        # do not support caffe_c4 model anymore\n        mask_pred = mask_head(mask_feats)\n\n        mask_results = dict(mask_pred=mask_pred)\n        return mask_results\n\n    def _mask_forward_train(self,\n                            stage,\n                            x,\n                            sampling_results,\n                            gt_masks,\n                            rcnn_train_cfg,\n                            bbox_feats=None):\n        \"\"\"Run forward function and calculate loss for mask head in\n        training.\"\"\"\n        pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n        mask_results = self._mask_forward(stage, x, pos_rois)\n\n        mask_targets = self.mask_head[stage].get_targets(\n            sampling_results, gt_masks, rcnn_train_cfg)\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n        loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],\n                                               mask_targets, pos_labels)\n\n        mask_results.update(loss_mask=loss_mask)\n        return mask_results\n\n    def forward_train(self,\n                      x,\n                      img_metas,\n                      proposal_list,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None):\n        \"\"\"\n        Args:\n            x (list[Tensor]): list of multi-level img features.\n            img_metas (list[dict]): list of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n            proposals (list[Tensors]): list of region proposals.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n            gt_masks (None | Tensor) : true segmentation masks for each box\n                used if the architecture supports a segmentation task.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        losses = dict()\n        for i in range(self.num_stages):\n            self.current_stage = i\n            rcnn_train_cfg = self.train_cfg[i]\n            lw = self.stage_loss_weights[i]\n\n            # assign gts and sample proposals\n            sampling_results = []\n            if self.with_bbox or self.with_mask:\n                bbox_assigner = self.bbox_assigner[i]\n                bbox_sampler = self.bbox_sampler[i]\n                num_imgs = len(img_metas)\n                if gt_bboxes_ignore is None:\n                    gt_bboxes_ignore = [None for _ in range(num_imgs)]\n\n                for j in range(num_imgs):\n                    assign_result = bbox_assigner.assign(\n                        proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],\n                        gt_labels[j])\n                    sampling_result = bbox_sampler.sample(\n                        assign_result,\n                        proposal_list[j],\n                        gt_bboxes[j],\n                        gt_labels[j],\n                        feats=[lvl_feat[j][None] for lvl_feat in x])\n                    sampling_results.append(sampling_result)\n\n            # bbox head forward and loss\n            bbox_results = self._bbox_forward_train(i, x, sampling_results,\n                                                    gt_bboxes, gt_labels,\n                                                    rcnn_train_cfg)\n\n            for name, value in bbox_results['loss_bbox'].items():\n                losses[f's{i}.{name}'] = (\n                    value * lw if 'loss' in name else value)\n\n            # mask head forward and loss\n            if self.with_mask:\n                mask_results = self._mask_forward_train(\n                    i, x, sampling_results, gt_masks, rcnn_train_cfg,\n                    bbox_results['bbox_feats'])\n                for name, value in mask_results['loss_mask'].items():\n                    losses[f's{i}.{name}'] = (\n                        value * lw if 'loss' in name else value)\n\n            # refine bboxes\n            if i < self.num_stages - 1:\n                pos_is_gts = [res.pos_is_gt for res in sampling_results]\n                # bbox_targets is a tuple\n                roi_labels = bbox_results['bbox_targets'][0]\n                with torch.no_grad():\n                    cls_score = bbox_results['cls_score']\n                    if self.bbox_head[i].custom_activation:\n                        cls_score = self.bbox_head[i].loss_cls.get_activation(\n                            cls_score)\n\n                    # Empty proposal.\n                    if cls_score.numel() == 0:\n                        break\n\n                    roi_labels = torch.where(\n                        roi_labels == self.bbox_head[i].num_classes,\n                        cls_score[:, :-1].argmax(1), roi_labels)\n                    proposal_list = self.bbox_head[i].refine_bboxes(\n                        bbox_results['rois'], roi_labels,\n                        bbox_results['bbox_pred'], pos_is_gts, img_metas)\n\n        return losses\n\n    def simple_test(self, x, proposal_list, img_metas, rescale=False):\n        \"\"\"Test without augmentation.\n\n        Args:\n            x (tuple[Tensor]): Features from upstream network. Each\n                has shape (batch_size, c, h, w).\n            proposal_list (list(Tensor)): Proposals from rpn head.\n                Each has shape (num_proposals, 5), last dimension\n                5 represent (x1, y1, x2, y2, score).\n            img_metas (list[dict]): Meta information of images.\n            rescale (bool): Whether to rescale the results to\n                the original image. Default: True.\n\n        Returns:\n            list[list[np.ndarray]] or list[tuple]: When no mask branch,\n            it is bbox results of each image and classes with type\n            `list[list[np.ndarray]]`. The outer list\n            corresponds to each image. The inner list\n            corresponds to each class. When the model has mask branch,\n            it contains bbox results and mask results.\n            The outer list corresponds to each image, and first element\n            of tuple is bbox results, second element is mask results.\n        \"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        num_imgs = len(proposal_list)\n        img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n        ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n        scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n        # \"ms\" in variable names means multi-stage\n        ms_bbox_result = {}\n        ms_segm_result = {}\n        ms_scores = []\n        rcnn_test_cfg = self.test_cfg\n\n        rois = bbox2roi(proposal_list)\n\n        if rois.shape[0] == 0:\n            # There is no proposal in the whole batch\n            bbox_results = [[\n                np.zeros((0, 5), dtype=np.float32)\n                for _ in range(self.bbox_head[-1].num_classes)\n            ]] * num_imgs\n\n            if self.with_mask:\n                mask_classes = self.mask_head[-1].num_classes\n                segm_results = [[[] for _ in range(mask_classes)]\n                                for _ in range(num_imgs)]\n                results = list(zip(bbox_results, segm_results))\n            else:\n                results = bbox_results\n\n            return results\n\n        for i in range(self.num_stages):\n            bbox_results = self._bbox_forward(i, x, rois)\n\n            # split batch bbox prediction back to each image\n            cls_score = bbox_results['cls_score']\n            bbox_pred = bbox_results['bbox_pred']\n            num_proposals_per_img = tuple(\n                len(proposals) for proposals in proposal_list)\n            rois = rois.split(num_proposals_per_img, 0)\n            cls_score = cls_score.split(num_proposals_per_img, 0)\n            if isinstance(bbox_pred, torch.Tensor):\n                bbox_pred = bbox_pred.split(num_proposals_per_img, 0)\n            else:\n                bbox_pred = self.bbox_head[i].bbox_pred_split(\n                    bbox_pred, num_proposals_per_img)\n            ms_scores.append(cls_score)\n\n            if i < self.num_stages - 1:\n                if self.bbox_head[i].custom_activation:\n                    cls_score = [\n                        self.bbox_head[i].loss_cls.get_activation(s)\n                        for s in cls_score\n                    ]\n                refine_rois_list = []\n                for j in range(num_imgs):\n                    if rois[j].shape[0] > 0:\n                        bbox_label = cls_score[j][:, :-1].argmax(dim=1)\n                        refined_rois = self.bbox_head[i].regress_by_class(\n                            rois[j], bbox_label, bbox_pred[j], img_metas[j])\n                        refine_rois_list.append(refined_rois)\n                rois = torch.cat(refine_rois_list)\n\n        # average scores of each image by stages\n        cls_score = [\n            sum([score[i] for score in ms_scores]) / float(len(ms_scores))\n            for i in range(num_imgs)\n        ]\n\n        # apply bbox post-processing to each image individually\n        det_bboxes = []\n        det_labels = []\n        for i in range(num_imgs):\n            det_bbox, det_label = self.bbox_head[-1].get_bboxes(\n                rois[i],\n                cls_score[i],\n                bbox_pred[i],\n                img_shapes[i],\n                scale_factors[i],\n                rescale=rescale,\n                cfg=rcnn_test_cfg)\n            det_bboxes.append(det_bbox)\n            det_labels.append(det_label)\n\n        bbox_results = [\n            bbox2result(det_bboxes[i], det_labels[i],\n                        self.bbox_head[-1].num_classes)\n            for i in range(num_imgs)\n        ]\n        ms_bbox_result['ensemble'] = bbox_results\n\n        if self.with_mask:\n            if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n                mask_classes = self.mask_head[-1].num_classes\n                segm_results = [[[] for _ in range(mask_classes)]\n                                for _ in range(num_imgs)]\n            else:\n                if rescale and not isinstance(scale_factors[0], float):\n                    scale_factors = [\n                        torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n                        for scale_factor in scale_factors\n                    ]\n                _bboxes = [\n                    det_bboxes[i][:, :4] *\n                    scale_factors[i] if rescale else det_bboxes[i][:, :4]\n                    for i in range(len(det_bboxes))\n                ]\n                mask_rois = bbox2roi(_bboxes)\n                num_mask_rois_per_img = tuple(\n                    _bbox.size(0) for _bbox in _bboxes)\n                aug_masks = []\n                for i in range(self.num_stages):\n                    mask_results = self._mask_forward(i, x, mask_rois)\n                    mask_pred = mask_results['mask_pred']\n                    # split batch mask prediction back to each image\n                    mask_pred = mask_pred.split(num_mask_rois_per_img, 0)\n                    aug_masks.append([\n                        m.sigmoid().cpu().detach().numpy() for m in mask_pred\n                    ])\n\n                # apply mask post-processing to each image individually\n                segm_results = []\n                for i in range(num_imgs):\n                    if det_bboxes[i].shape[0] == 0:\n                        segm_results.append(\n                            [[]\n                             for _ in range(self.mask_head[-1].num_classes)])\n                    else:\n                        aug_mask = [mask[i] for mask in aug_masks]\n                        merged_masks = merge_aug_masks(\n                            aug_mask, [[img_metas[i]]] * self.num_stages,\n                            rcnn_test_cfg)\n                        segm_result = self.mask_head[-1].get_seg_masks(\n                            merged_masks, _bboxes[i], det_labels[i],\n                            rcnn_test_cfg, ori_shapes[i], scale_factors[i],\n                            rescale)\n                        segm_results.append(segm_result)\n            ms_segm_result['ensemble'] = segm_results\n\n        if self.with_mask:\n            results = list(\n                zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))\n        else:\n            results = ms_bbox_result['ensemble']\n\n        return results\n\n    def aug_test(self, features, proposal_list, img_metas, rescale=False):\n        \"\"\"Test with augmentations.\n\n        If rescale is False, then returned bboxes and masks will fit the scale\n        of imgs[0].\n        \"\"\"\n        rcnn_test_cfg = self.test_cfg\n        aug_bboxes = []\n        aug_scores = []\n        for x, img_meta in zip(features, img_metas):\n            # only one image in the batch\n            img_shape = img_meta[0]['img_shape']\n            scale_factor = img_meta[0]['scale_factor']\n            flip = img_meta[0]['flip']\n            flip_direction = img_meta[0]['flip_direction']\n\n            proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n                                     scale_factor, flip, flip_direction)\n            # \"ms\" in variable names means multi-stage\n            ms_scores = []\n\n            rois = bbox2roi([proposals])\n\n            if rois.shape[0] == 0:\n                # There is no proposal in the single image\n                aug_bboxes.append(rois.new_zeros(0, 4))\n                aug_scores.append(rois.new_zeros(0, 1))\n                continue\n\n            for i in range(self.num_stages):\n                bbox_results = self._bbox_forward(i, x, rois)\n                ms_scores.append(bbox_results['cls_score'])\n\n                if i < self.num_stages - 1:\n                    cls_score = bbox_results['cls_score']\n                    if self.bbox_head[i].custom_activation:\n                        cls_score = self.bbox_head[i].loss_cls.get_activation(\n                            cls_score)\n                    bbox_label = cls_score[:, :-1].argmax(dim=1)\n                    rois = self.bbox_head[i].regress_by_class(\n                        rois, bbox_label, bbox_results['bbox_pred'],\n                        img_meta[0])\n\n            cls_score = sum(ms_scores) / float(len(ms_scores))\n            bboxes, scores = self.bbox_head[-1].get_bboxes(\n                rois,\n                cls_score,\n                bbox_results['bbox_pred'],\n                img_shape,\n                scale_factor,\n                rescale=False,\n                cfg=None)\n            aug_bboxes.append(bboxes)\n            aug_scores.append(scores)\n\n        # after merging, bboxes will be rescaled to the original image size\n        merged_bboxes, merged_scores = merge_aug_bboxes(\n            aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n        det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n                                                rcnn_test_cfg.score_thr,\n                                                rcnn_test_cfg.nms,\n                                                rcnn_test_cfg.max_per_img)\n\n        bbox_result = bbox2result(det_bboxes, det_labels,\n                                  self.bbox_head[-1].num_classes)\n\n        if self.with_mask:\n            if det_bboxes.shape[0] == 0:\n                segm_result = [[]\n                               for _ in range(self.mask_head[-1].num_classes)]\n            else:\n                aug_masks = []\n                aug_img_metas = []\n                for x, img_meta in zip(features, img_metas):\n                    img_shape = img_meta[0]['img_shape']\n                    scale_factor = img_meta[0]['scale_factor']\n                    flip = img_meta[0]['flip']\n                    flip_direction = img_meta[0]['flip_direction']\n                    _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n                                           scale_factor, flip, flip_direction)\n                    mask_rois = bbox2roi([_bboxes])\n                    for i in range(self.num_stages):\n                        mask_results = self._mask_forward(i, x, mask_rois)\n                        aug_masks.append(\n                            mask_results['mask_pred'].sigmoid().cpu().numpy())\n                        aug_img_metas.append(img_meta)\n                merged_masks = merge_aug_masks(aug_masks, aug_img_metas,\n                                               self.test_cfg)\n\n                ori_shape = img_metas[0][0]['ori_shape']\n                dummy_scale_factor = np.ones(4)\n                segm_result = self.mask_head[-1].get_seg_masks(\n                    merged_masks,\n                    det_bboxes,\n                    det_labels,\n                    rcnn_test_cfg,\n                    ori_shape,\n                    scale_factor=dummy_scale_factor,\n                    rescale=False)\n            return [(bbox_result, segm_result)]\n        else:\n            return [bbox_result]\n\n    def onnx_export(self, x, proposals, img_metas):\n\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        assert proposals.shape[0] == 1, 'Only support one input image ' \\\n                                        'while in exporting to ONNX'\n        # remove the scores\n        rois = proposals[..., :-1]\n        batch_size = rois.shape[0]\n        num_proposals_per_img = rois.shape[1]\n        # Eliminate the batch dimension\n        rois = rois.view(-1, 4)\n\n        # add dummy batch index\n        rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1)\n\n        max_shape = img_metas[0]['img_shape_for_onnx']\n        ms_scores = []\n        rcnn_test_cfg = self.test_cfg\n\n        for i in range(self.num_stages):\n            bbox_results = self._bbox_forward(i, x, rois)\n\n            cls_score = bbox_results['cls_score']\n            bbox_pred = bbox_results['bbox_pred']\n            # Recover the batch dimension\n            rois = rois.reshape(batch_size, num_proposals_per_img,\n                                rois.size(-1))\n            cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n                                          cls_score.size(-1))\n            bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4)\n            ms_scores.append(cls_score)\n            if i < self.num_stages - 1:\n                assert self.bbox_head[i].reg_class_agnostic\n                new_rois = self.bbox_head[i].bbox_coder.decode(\n                    rois[..., 1:], bbox_pred, max_shape=max_shape)\n                rois = new_rois.reshape(-1, new_rois.shape[-1])\n                # add dummy batch index\n                rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois],\n                                 dim=-1)\n\n        cls_score = sum(ms_scores) / float(len(ms_scores))\n        bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4)\n        rois = rois.reshape(batch_size, num_proposals_per_img, -1)\n        det_bboxes, det_labels = self.bbox_head[-1].onnx_export(\n            rois, cls_score, bbox_pred, max_shape, cfg=rcnn_test_cfg)\n\n        if not self.with_mask:\n            return det_bboxes, det_labels\n        else:\n            batch_index = torch.arange(\n                det_bboxes.size(0),\n                device=det_bboxes.device).float().view(-1, 1, 1).expand(\n                    det_bboxes.size(0), det_bboxes.size(1), 1)\n            rois = det_bboxes[..., :4]\n            mask_rois = torch.cat([batch_index, rois], dim=-1)\n            mask_rois = mask_rois.view(-1, 5)\n            aug_masks = []\n            for i in range(self.num_stages):\n                mask_results = self._mask_forward(i, x, mask_rois)\n                mask_pred = mask_results['mask_pred']\n                aug_masks.append(mask_pred)\n            max_shape = img_metas[0]['img_shape_for_onnx']\n            # calculate the mean of masks from several stage\n            mask_pred = sum(aug_masks) / len(aug_masks)\n            segm_results = self.mask_head[-1].onnx_export(\n                mask_pred, rois.reshape(-1, 4), det_labels.reshape(-1),\n                self.test_cfg, max_shape)\n            segm_results = segm_results.reshape(batch_size,\n                                                det_bboxes.shape[1],\n                                                max_shape[0], max_shape[1])\n            return det_bboxes, det_labels, segm_results\n"
  },
  {
    "path": "mmdet/models/roi_heads/double_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import HEADS\nfrom .standard_roi_head import StandardRoIHead\n\n\n@HEADS.register_module()\nclass DoubleHeadRoIHead(StandardRoIHead):\n    \"\"\"RoI head for Double Head RCNN.\n\n    https://arxiv.org/abs/1904.06493\n    \"\"\"\n\n    def __init__(self, reg_roi_scale_factor, **kwargs):\n        super(DoubleHeadRoIHead, self).__init__(**kwargs)\n        self.reg_roi_scale_factor = reg_roi_scale_factor\n\n    def _bbox_forward(self, x, rois):\n        \"\"\"Box head forward function used in both training and testing time.\"\"\"\n        bbox_cls_feats = self.bbox_roi_extractor(\n            x[:self.bbox_roi_extractor.num_inputs], rois)\n        bbox_reg_feats = self.bbox_roi_extractor(\n            x[:self.bbox_roi_extractor.num_inputs],\n            rois,\n            roi_scale_factor=self.reg_roi_scale_factor)\n        if self.with_shared_head:\n            bbox_cls_feats = self.shared_head(bbox_cls_feats)\n            bbox_reg_feats = self.shared_head(bbox_reg_feats)\n        cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)\n\n        bbox_results = dict(\n            cls_score=cls_score,\n            bbox_pred=bbox_pred,\n            bbox_feats=bbox_cls_feats)\n        return bbox_results\n"
  },
  {
    "path": "mmdet/models/roi_heads/dynamic_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom mmdet.core import bbox2roi\nfrom mmdet.models.losses import SmoothL1Loss\nfrom ..builder import HEADS\nfrom .standard_roi_head import StandardRoIHead\n\nEPS = 1e-15\n\n\n@HEADS.register_module()\nclass DynamicRoIHead(StandardRoIHead):\n    \"\"\"RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_.\"\"\"\n\n    def __init__(self, **kwargs):\n        super(DynamicRoIHead, self).__init__(**kwargs)\n        assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)\n        # the IoU history of the past `update_iter_interval` iterations\n        self.iou_history = []\n        # the beta history of the past `update_iter_interval` iterations\n        self.beta_history = []\n\n    def forward_train(self,\n                      x,\n                      img_metas,\n                      proposal_list,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None):\n        \"\"\"Forward function for training.\n\n        Args:\n            x (list[Tensor]): list of multi-level img features.\n\n            img_metas (list[dict]): list of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n            proposals (list[Tensors]): list of region proposals.\n\n            gt_bboxes (list[Tensor]): each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n\n            gt_labels (list[Tensor]): class indices corresponding to each box\n\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n            gt_masks (None | Tensor) : true segmentation masks for each box\n                used if the architecture supports a segmentation task.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        # assign gts and sample proposals\n        if self.with_bbox or self.with_mask:\n            num_imgs = len(img_metas)\n            if gt_bboxes_ignore is None:\n                gt_bboxes_ignore = [None for _ in range(num_imgs)]\n            sampling_results = []\n            cur_iou = []\n            for i in range(num_imgs):\n                assign_result = self.bbox_assigner.assign(\n                    proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],\n                    gt_labels[i])\n                sampling_result = self.bbox_sampler.sample(\n                    assign_result,\n                    proposal_list[i],\n                    gt_bboxes[i],\n                    gt_labels[i],\n                    feats=[lvl_feat[i][None] for lvl_feat in x])\n                # record the `iou_topk`-th largest IoU in an image\n                iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,\n                               len(assign_result.max_overlaps))\n                ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)\n                cur_iou.append(ious[-1].item())\n                sampling_results.append(sampling_result)\n            # average the current IoUs over images\n            cur_iou = np.mean(cur_iou)\n            self.iou_history.append(cur_iou)\n\n        losses = dict()\n        # bbox head forward and loss\n        if self.with_bbox:\n            bbox_results = self._bbox_forward_train(x, sampling_results,\n                                                    gt_bboxes, gt_labels,\n                                                    img_metas)\n            losses.update(bbox_results['loss_bbox'])\n\n        # mask head forward and loss\n        if self.with_mask:\n            mask_results = self._mask_forward_train(x, sampling_results,\n                                                    bbox_results['bbox_feats'],\n                                                    gt_masks, img_metas)\n            losses.update(mask_results['loss_mask'])\n\n        # update IoU threshold and SmoothL1 beta\n        update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval\n        if len(self.iou_history) % update_iter_interval == 0:\n            new_iou_thr, new_beta = self.update_hyperparameters()\n\n        return losses\n\n    def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,\n                            img_metas):\n        num_imgs = len(img_metas)\n        rois = bbox2roi([res.bboxes for res in sampling_results])\n        bbox_results = self._bbox_forward(x, rois)\n\n        bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,\n                                                  gt_labels, self.train_cfg)\n        # record the `beta_topk`-th smallest target\n        # `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets\n        # and bbox_weights, respectively\n        pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)\n        num_pos = len(pos_inds)\n        cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)\n        beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,\n                        num_pos)\n        cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()\n        self.beta_history.append(cur_target)\n        loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],\n                                        bbox_results['bbox_pred'], rois,\n                                        *bbox_targets)\n\n        bbox_results.update(loss_bbox=loss_bbox)\n        return bbox_results\n\n    def update_hyperparameters(self):\n        \"\"\"Update hyperparameters like IoU thresholds for assigner and beta for\n        SmoothL1 loss based on the training statistics.\n\n        Returns:\n            tuple[float]: the updated ``iou_thr`` and ``beta``.\n        \"\"\"\n        new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,\n                          np.mean(self.iou_history))\n        self.iou_history = []\n        self.bbox_assigner.pos_iou_thr = new_iou_thr\n        self.bbox_assigner.neg_iou_thr = new_iou_thr\n        self.bbox_assigner.min_pos_iou = new_iou_thr\n        if (np.median(self.beta_history) < EPS):\n            # avoid 0 or too small value for new_beta\n            new_beta = self.bbox_head.loss_bbox.beta\n        else:\n            new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,\n                           np.median(self.beta_history))\n        self.beta_history = []\n        self.bbox_head.loss_bbox.beta = new_beta\n        return new_iou_thr, new_beta\n"
  },
  {
    "path": "mmdet/models/roi_heads/grid_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom mmdet.core import bbox2result, bbox2roi\nfrom ..builder import HEADS, build_head, build_roi_extractor\nfrom .standard_roi_head import StandardRoIHead\n\n\n@HEADS.register_module()\nclass GridRoIHead(StandardRoIHead):\n    \"\"\"Grid roi head for Grid R-CNN.\n\n    https://arxiv.org/abs/1811.12030\n    \"\"\"\n\n    def __init__(self, grid_roi_extractor, grid_head, **kwargs):\n        assert grid_head is not None\n        super(GridRoIHead, self).__init__(**kwargs)\n        if grid_roi_extractor is not None:\n            self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor)\n            self.share_roi_extractor = False\n        else:\n            self.share_roi_extractor = True\n            self.grid_roi_extractor = self.bbox_roi_extractor\n        self.grid_head = build_head(grid_head)\n\n    def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):\n        \"\"\"Ramdom jitter positive proposals for training.\"\"\"\n        for sampling_result, img_meta in zip(sampling_results, img_metas):\n            bboxes = sampling_result.pos_bboxes\n            random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(\n                -amplitude, amplitude)\n            # before jittering\n            cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2\n            wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()\n            # after jittering\n            new_cxcy = cxcy + wh * random_offsets[:, :2]\n            new_wh = wh * (1 + random_offsets[:, 2:])\n            # xywh to xyxy\n            new_x1y1 = (new_cxcy - new_wh / 2)\n            new_x2y2 = (new_cxcy + new_wh / 2)\n            new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)\n            # clip bboxes\n            max_shape = img_meta['img_shape']\n            if max_shape is not None:\n                new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)\n                new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)\n\n            sampling_result.pos_bboxes = new_bboxes\n        return sampling_results\n\n    def forward_dummy(self, x, proposals):\n        \"\"\"Dummy forward function.\"\"\"\n        # bbox head\n        outs = ()\n        rois = bbox2roi([proposals])\n        if self.with_bbox:\n            bbox_results = self._bbox_forward(x, rois)\n            outs = outs + (bbox_results['cls_score'],\n                           bbox_results['bbox_pred'])\n\n        # grid head\n        grid_rois = rois[:100]\n        grid_feats = self.grid_roi_extractor(\n            x[:self.grid_roi_extractor.num_inputs], grid_rois)\n        if self.with_shared_head:\n            grid_feats = self.shared_head(grid_feats)\n        grid_pred = self.grid_head(grid_feats)\n        outs = outs + (grid_pred, )\n\n        # mask head\n        if self.with_mask:\n            mask_rois = rois[:100]\n            mask_results = self._mask_forward(x, mask_rois)\n            outs = outs + (mask_results['mask_pred'], )\n        return outs\n\n    def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,\n                            img_metas):\n        \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n        bbox_results = super(GridRoIHead,\n                             self)._bbox_forward_train(x, sampling_results,\n                                                       gt_bboxes, gt_labels,\n                                                       img_metas)\n\n        # Grid head forward and loss\n        sampling_results = self._random_jitter(sampling_results, img_metas)\n        pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n\n        # GN in head does not support zero shape input\n        if pos_rois.shape[0] == 0:\n            return bbox_results\n\n        grid_feats = self.grid_roi_extractor(\n            x[:self.grid_roi_extractor.num_inputs], pos_rois)\n        if self.with_shared_head:\n            grid_feats = self.shared_head(grid_feats)\n        # Accelerate training\n        max_sample_num_grid = self.train_cfg.get('max_num_grid', 192)\n        sample_idx = torch.randperm(\n            grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid\n                                      )]\n        grid_feats = grid_feats[sample_idx]\n\n        grid_pred = self.grid_head(grid_feats)\n\n        grid_targets = self.grid_head.get_targets(sampling_results,\n                                                  self.train_cfg)\n        grid_targets = grid_targets[sample_idx]\n\n        loss_grid = self.grid_head.loss(grid_pred, grid_targets)\n\n        bbox_results['loss_bbox'].update(loss_grid)\n        return bbox_results\n\n    def simple_test(self,\n                    x,\n                    proposal_list,\n                    img_metas,\n                    proposals=None,\n                    rescale=False):\n        \"\"\"Test without augmentation.\"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n\n        det_bboxes, det_labels = self.simple_test_bboxes(\n            x, img_metas, proposal_list, self.test_cfg, rescale=False)\n        # pack rois into bboxes\n        grid_rois = bbox2roi([det_bbox[:, :4] for det_bbox in det_bboxes])\n        if grid_rois.shape[0] != 0:\n            grid_feats = self.grid_roi_extractor(\n                x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)\n            self.grid_head.test_mode = True\n            grid_pred = self.grid_head(grid_feats)\n            # split batch grid head prediction back to each image\n            num_roi_per_img = tuple(len(det_bbox) for det_bbox in det_bboxes)\n            grid_pred = {\n                k: v.split(num_roi_per_img, 0)\n                for k, v in grid_pred.items()\n            }\n\n            # apply bbox post-processing to each image individually\n            bbox_results = []\n            num_imgs = len(det_bboxes)\n            for i in range(num_imgs):\n                if det_bboxes[i].shape[0] == 0:\n                    bbox_results.append([\n                        np.zeros((0, 5), dtype=np.float32)\n                        for _ in range(self.bbox_head.num_classes)\n                    ])\n                else:\n                    det_bbox = self.grid_head.get_bboxes(\n                        det_bboxes[i], grid_pred['fused'][i], [img_metas[i]])\n                    if rescale:\n                        det_bbox[:, :4] /= img_metas[i]['scale_factor']\n                    bbox_results.append(\n                        bbox2result(det_bbox, det_labels[i],\n                                    self.bbox_head.num_classes))\n        else:\n            bbox_results = [[\n                np.zeros((0, 5), dtype=np.float32)\n                for _ in range(self.bbox_head.num_classes)\n            ] for _ in range(len(det_bboxes))]\n\n        if not self.with_mask:\n            return bbox_results\n        else:\n            segm_results = self.simple_test_mask(\n                x, img_metas, det_bboxes, det_labels, rescale=rescale)\n            return list(zip(bbox_results, segm_results))\n"
  },
  {
    "path": "mmdet/models/roi_heads/htc_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,\n                        merge_aug_masks, multiclass_nms)\nfrom ..builder import HEADS, build_head, build_roi_extractor\nfrom ..utils.brick_wrappers import adaptive_avg_pool2d\nfrom .cascade_roi_head import CascadeRoIHead\n\n\n@HEADS.register_module()\nclass HybridTaskCascadeRoIHead(CascadeRoIHead):\n    \"\"\"Hybrid task cascade roi head including one bbox head and one mask head.\n\n    https://arxiv.org/abs/1901.07518\n    \"\"\"\n\n    def __init__(self,\n                 num_stages,\n                 stage_loss_weights,\n                 semantic_roi_extractor=None,\n                 semantic_head=None,\n                 semantic_fusion=('bbox', 'mask'),\n                 interleaved=True,\n                 mask_info_flow=True,\n                 **kwargs):\n        super(HybridTaskCascadeRoIHead,\n              self).__init__(num_stages, stage_loss_weights, **kwargs)\n        assert self.with_bbox\n        assert not self.with_shared_head  # shared head is not supported\n\n        if semantic_head is not None:\n            self.semantic_roi_extractor = build_roi_extractor(\n                semantic_roi_extractor)\n            self.semantic_head = build_head(semantic_head)\n\n        self.semantic_fusion = semantic_fusion\n        self.interleaved = interleaved\n        self.mask_info_flow = mask_info_flow\n\n    @property\n    def with_semantic(self):\n        \"\"\"bool: whether the head has semantic head\"\"\"\n        if hasattr(self, 'semantic_head') and self.semantic_head is not None:\n            return True\n        else:\n            return False\n\n    def forward_dummy(self, x, proposals):\n        \"\"\"Dummy forward function.\"\"\"\n        outs = ()\n        # semantic head\n        if self.with_semantic:\n            _, semantic_feat = self.semantic_head(x)\n        else:\n            semantic_feat = None\n        # bbox heads\n        rois = bbox2roi([proposals])\n        for i in range(self.num_stages):\n            bbox_results = self._bbox_forward(\n                i, x, rois, semantic_feat=semantic_feat)\n            outs = outs + (bbox_results['cls_score'],\n                           bbox_results['bbox_pred'])\n        # mask heads\n        if self.with_mask:\n            mask_rois = rois[:100]\n            mask_roi_extractor = self.mask_roi_extractor[-1]\n            mask_feats = mask_roi_extractor(\n                x[:len(mask_roi_extractor.featmap_strides)], mask_rois)\n            if self.with_semantic and 'mask' in self.semantic_fusion:\n                mask_semantic_feat = self.semantic_roi_extractor(\n                    [semantic_feat], mask_rois)\n                mask_feats = mask_feats + mask_semantic_feat\n            last_feat = None\n            for i in range(self.num_stages):\n                mask_head = self.mask_head[i]\n                if self.mask_info_flow:\n                    mask_pred, last_feat = mask_head(mask_feats, last_feat)\n                else:\n                    mask_pred = mask_head(mask_feats)\n                outs = outs + (mask_pred, )\n        return outs\n\n    def _bbox_forward_train(self,\n                            stage,\n                            x,\n                            sampling_results,\n                            gt_bboxes,\n                            gt_labels,\n                            rcnn_train_cfg,\n                            semantic_feat=None):\n        \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n        bbox_head = self.bbox_head[stage]\n        rois = bbox2roi([res.bboxes for res in sampling_results])\n        bbox_results = self._bbox_forward(\n            stage, x, rois, semantic_feat=semantic_feat)\n\n        bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,\n                                             gt_labels, rcnn_train_cfg)\n        loss_bbox = bbox_head.loss(bbox_results['cls_score'],\n                                   bbox_results['bbox_pred'], rois,\n                                   *bbox_targets)\n\n        bbox_results.update(\n            loss_bbox=loss_bbox,\n            rois=rois,\n            bbox_targets=bbox_targets,\n        )\n        return bbox_results\n\n    def _mask_forward_train(self,\n                            stage,\n                            x,\n                            sampling_results,\n                            gt_masks,\n                            rcnn_train_cfg,\n                            semantic_feat=None):\n        \"\"\"Run forward function and calculate loss for mask head in\n        training.\"\"\"\n        mask_roi_extractor = self.mask_roi_extractor[stage]\n        mask_head = self.mask_head[stage]\n        pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n        mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n                                        pos_rois)\n\n        # semantic feature fusion\n        # element-wise sum for original features and pooled semantic features\n        if self.with_semantic and 'mask' in self.semantic_fusion:\n            mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],\n                                                             pos_rois)\n            if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:\n                mask_semantic_feat = F.adaptive_avg_pool2d(\n                    mask_semantic_feat, mask_feats.shape[-2:])\n            mask_feats = mask_feats + mask_semantic_feat\n\n        # mask information flow\n        # forward all previous mask heads to obtain last_feat, and fuse it\n        # with the normal mask feature\n        if self.mask_info_flow:\n            last_feat = None\n            for i in range(stage):\n                last_feat = self.mask_head[i](\n                    mask_feats, last_feat, return_logits=False)\n            mask_pred = mask_head(mask_feats, last_feat, return_feat=False)\n        else:\n            mask_pred = mask_head(mask_feats, return_feat=False)\n\n        mask_targets = mask_head.get_targets(sampling_results, gt_masks,\n                                             rcnn_train_cfg)\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n        loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)\n\n        mask_results = dict(loss_mask=loss_mask)\n        return mask_results\n\n    def _bbox_forward(self, stage, x, rois, semantic_feat=None):\n        \"\"\"Box head forward function used in both training and testing.\"\"\"\n        bbox_roi_extractor = self.bbox_roi_extractor[stage]\n        bbox_head = self.bbox_head[stage]\n        bbox_feats = bbox_roi_extractor(\n            x[:len(bbox_roi_extractor.featmap_strides)], rois)\n        if self.with_semantic and 'bbox' in self.semantic_fusion:\n            bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],\n                                                             rois)\n            if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:\n                bbox_semantic_feat = adaptive_avg_pool2d(\n                    bbox_semantic_feat, bbox_feats.shape[-2:])\n            bbox_feats = bbox_feats + bbox_semantic_feat\n        cls_score, bbox_pred = bbox_head(bbox_feats)\n\n        bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)\n        return bbox_results\n\n    def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):\n        \"\"\"Mask head forward function for testing.\"\"\"\n        mask_roi_extractor = self.mask_roi_extractor[stage]\n        mask_head = self.mask_head[stage]\n        mask_rois = bbox2roi([bboxes])\n        mask_feats = mask_roi_extractor(\n            x[:len(mask_roi_extractor.featmap_strides)], mask_rois)\n        if self.with_semantic and 'mask' in self.semantic_fusion:\n            mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],\n                                                             mask_rois)\n            if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:\n                mask_semantic_feat = F.adaptive_avg_pool2d(\n                    mask_semantic_feat, mask_feats.shape[-2:])\n            mask_feats = mask_feats + mask_semantic_feat\n        if self.mask_info_flow:\n            last_feat = None\n            last_pred = None\n            for i in range(stage):\n                mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat)\n                if last_pred is not None:\n                    mask_pred = mask_pred + last_pred\n                last_pred = mask_pred\n            mask_pred = mask_head(mask_feats, last_feat, return_feat=False)\n            if last_pred is not None:\n                mask_pred = mask_pred + last_pred\n        else:\n            mask_pred = mask_head(mask_feats)\n        return mask_pred\n\n    def forward_train(self,\n                      x,\n                      img_metas,\n                      proposal_list,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None,\n                      gt_semantic_seg=None):\n        \"\"\"\n        Args:\n            x (list[Tensor]): list of multi-level img features.\n\n            img_metas (list[dict]): list of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n\n            proposal_list (list[Tensors]): list of region proposals.\n\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n\n            gt_labels (list[Tensor]): class indices corresponding to each box\n\n            gt_bboxes_ignore (None, list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n\n            gt_masks (None, Tensor) : true segmentation masks for each box\n                used if the architecture supports a segmentation task.\n\n            gt_semantic_seg (None, list[Tensor]): semantic segmentation masks\n                used if the architecture supports semantic segmentation task.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        # semantic segmentation part\n        # 2 outputs: segmentation prediction and embedded features\n        losses = dict()\n        if self.with_semantic:\n            semantic_pred, semantic_feat = self.semantic_head(x)\n            loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)\n            losses['loss_semantic_seg'] = loss_seg\n        else:\n            semantic_feat = None\n\n        for i in range(self.num_stages):\n            self.current_stage = i\n            rcnn_train_cfg = self.train_cfg[i]\n            lw = self.stage_loss_weights[i]\n\n            # assign gts and sample proposals\n            sampling_results = []\n            bbox_assigner = self.bbox_assigner[i]\n            bbox_sampler = self.bbox_sampler[i]\n            num_imgs = len(img_metas)\n            if gt_bboxes_ignore is None:\n                gt_bboxes_ignore = [None for _ in range(num_imgs)]\n\n            for j in range(num_imgs):\n                assign_result = bbox_assigner.assign(proposal_list[j],\n                                                     gt_bboxes[j],\n                                                     gt_bboxes_ignore[j],\n                                                     gt_labels[j])\n                sampling_result = bbox_sampler.sample(\n                    assign_result,\n                    proposal_list[j],\n                    gt_bboxes[j],\n                    gt_labels[j],\n                    feats=[lvl_feat[j][None] for lvl_feat in x])\n                sampling_results.append(sampling_result)\n\n            # bbox head forward and loss\n            bbox_results = \\\n                self._bbox_forward_train(\n                    i, x, sampling_results, gt_bboxes, gt_labels,\n                    rcnn_train_cfg, semantic_feat)\n            roi_labels = bbox_results['bbox_targets'][0]\n\n            for name, value in bbox_results['loss_bbox'].items():\n                losses[f's{i}.{name}'] = (\n                    value * lw if 'loss' in name else value)\n\n            # mask head forward and loss\n            if self.with_mask:\n                # interleaved execution: use regressed bboxes by the box branch\n                # to train the mask branch\n                if self.interleaved:\n                    pos_is_gts = [res.pos_is_gt for res in sampling_results]\n                    with torch.no_grad():\n                        proposal_list = self.bbox_head[i].refine_bboxes(\n                            bbox_results['rois'], roi_labels,\n                            bbox_results['bbox_pred'], pos_is_gts, img_metas)\n                        # re-assign and sample 512 RoIs from 512 RoIs\n                        sampling_results = []\n                        for j in range(num_imgs):\n                            assign_result = bbox_assigner.assign(\n                                proposal_list[j], gt_bboxes[j],\n                                gt_bboxes_ignore[j], gt_labels[j])\n                            sampling_result = bbox_sampler.sample(\n                                assign_result,\n                                proposal_list[j],\n                                gt_bboxes[j],\n                                gt_labels[j],\n                                feats=[lvl_feat[j][None] for lvl_feat in x])\n                            sampling_results.append(sampling_result)\n                mask_results = self._mask_forward_train(\n                    i, x, sampling_results, gt_masks, rcnn_train_cfg,\n                    semantic_feat)\n                for name, value in mask_results['loss_mask'].items():\n                    losses[f's{i}.{name}'] = (\n                        value * lw if 'loss' in name else value)\n\n            # refine bboxes (same as Cascade R-CNN)\n            if i < self.num_stages - 1 and not self.interleaved:\n                pos_is_gts = [res.pos_is_gt for res in sampling_results]\n                with torch.no_grad():\n                    proposal_list = self.bbox_head[i].refine_bboxes(\n                        bbox_results['rois'], roi_labels,\n                        bbox_results['bbox_pred'], pos_is_gts, img_metas)\n\n        return losses\n\n    def simple_test(self, x, proposal_list, img_metas, rescale=False):\n        \"\"\"Test without augmentation.\n\n        Args:\n            x (tuple[Tensor]): Features from upstream network. Each\n                has shape (batch_size, c, h, w).\n            proposal_list (list(Tensor)): Proposals from rpn head.\n                Each has shape (num_proposals, 5), last dimension\n                5 represent (x1, y1, x2, y2, score).\n            img_metas (list[dict]): Meta information of images.\n            rescale (bool): Whether to rescale the results to\n                the original image. Default: True.\n\n        Returns:\n            list[list[np.ndarray]] or list[tuple]: When no mask branch,\n            it is bbox results of each image and classes with type\n            `list[list[np.ndarray]]`. The outer list\n            corresponds to each image. The inner list\n            corresponds to each class. When the model has mask branch,\n            it contains bbox results and mask results.\n            The outer list corresponds to each image, and first element\n            of tuple is bbox results, second element is mask results.\n        \"\"\"\n        if self.with_semantic:\n            _, semantic_feat = self.semantic_head(x)\n        else:\n            semantic_feat = None\n\n        num_imgs = len(proposal_list)\n        img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n        ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n        scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n        # \"ms\" in variable names means multi-stage\n        ms_bbox_result = {}\n        ms_segm_result = {}\n        ms_scores = []\n        rcnn_test_cfg = self.test_cfg\n\n        rois = bbox2roi(proposal_list)\n\n        if rois.shape[0] == 0:\n            # There is no proposal in the whole batch\n            bbox_results = [[\n                np.zeros((0, 5), dtype=np.float32)\n                for _ in range(self.bbox_head[-1].num_classes)\n            ]] * num_imgs\n\n            if self.with_mask:\n                mask_classes = self.mask_head[-1].num_classes\n                segm_results = [[[] for _ in range(mask_classes)]\n                                for _ in range(num_imgs)]\n                results = list(zip(bbox_results, segm_results))\n            else:\n                results = bbox_results\n\n            return results\n\n        for i in range(self.num_stages):\n            bbox_head = self.bbox_head[i]\n            bbox_results = self._bbox_forward(\n                i, x, rois, semantic_feat=semantic_feat)\n            # split batch bbox prediction back to each image\n            cls_score = bbox_results['cls_score']\n            bbox_pred = bbox_results['bbox_pred']\n            num_proposals_per_img = tuple(len(p) for p in proposal_list)\n            rois = rois.split(num_proposals_per_img, 0)\n            cls_score = cls_score.split(num_proposals_per_img, 0)\n            bbox_pred = bbox_pred.split(num_proposals_per_img, 0)\n            ms_scores.append(cls_score)\n\n            if i < self.num_stages - 1:\n                refine_rois_list = []\n                for j in range(num_imgs):\n                    if rois[j].shape[0] > 0:\n                        bbox_label = cls_score[j][:, :-1].argmax(dim=1)\n                        refine_rois = bbox_head.regress_by_class(\n                            rois[j], bbox_label, bbox_pred[j], img_metas[j])\n                        refine_rois_list.append(refine_rois)\n                rois = torch.cat(refine_rois_list)\n\n        # average scores of each image by stages\n        cls_score = [\n            sum([score[i] for score in ms_scores]) / float(len(ms_scores))\n            for i in range(num_imgs)\n        ]\n\n        # apply bbox post-processing to each image individually\n        det_bboxes = []\n        det_labels = []\n        for i in range(num_imgs):\n            det_bbox, det_label = self.bbox_head[-1].get_bboxes(\n                rois[i],\n                cls_score[i],\n                bbox_pred[i],\n                img_shapes[i],\n                scale_factors[i],\n                rescale=rescale,\n                cfg=rcnn_test_cfg)\n            det_bboxes.append(det_bbox)\n            det_labels.append(det_label)\n        bbox_result = [\n            bbox2result(det_bboxes[i], det_labels[i],\n                        self.bbox_head[-1].num_classes)\n            for i in range(num_imgs)\n        ]\n        ms_bbox_result['ensemble'] = bbox_result\n\n        if self.with_mask:\n            if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n                mask_classes = self.mask_head[-1].num_classes\n                segm_results = [[[] for _ in range(mask_classes)]\n                                for _ in range(num_imgs)]\n            else:\n                if rescale and not isinstance(scale_factors[0], float):\n                    scale_factors = [\n                        torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n                        for scale_factor in scale_factors\n                    ]\n                _bboxes = [\n                    det_bboxes[i][:, :4] *\n                    scale_factors[i] if rescale else det_bboxes[i]\n                    for i in range(num_imgs)\n                ]\n                mask_rois = bbox2roi(_bboxes)\n                aug_masks = []\n                mask_roi_extractor = self.mask_roi_extractor[-1]\n                mask_feats = mask_roi_extractor(\n                    x[:len(mask_roi_extractor.featmap_strides)], mask_rois)\n                if self.with_semantic and 'mask' in self.semantic_fusion:\n                    mask_semantic_feat = self.semantic_roi_extractor(\n                        [semantic_feat], mask_rois)\n                    mask_feats = mask_feats + mask_semantic_feat\n                last_feat = None\n\n                num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)\n                for i in range(self.num_stages):\n                    mask_head = self.mask_head[i]\n                    if self.mask_info_flow:\n                        mask_pred, last_feat = mask_head(mask_feats, last_feat)\n                    else:\n                        mask_pred = mask_head(mask_feats)\n\n                    # split batch mask prediction back to each image\n                    mask_pred = mask_pred.split(num_bbox_per_img, 0)\n                    aug_masks.append(\n                        [mask.sigmoid().cpu().numpy() for mask in mask_pred])\n\n                # apply mask post-processing to each image individually\n                segm_results = []\n                for i in range(num_imgs):\n                    if det_bboxes[i].shape[0] == 0:\n                        segm_results.append(\n                            [[]\n                             for _ in range(self.mask_head[-1].num_classes)])\n                    else:\n                        aug_mask = [mask[i] for mask in aug_masks]\n                        merged_mask = merge_aug_masks(\n                            aug_mask, [[img_metas[i]]] * self.num_stages,\n                            rcnn_test_cfg)\n                        segm_result = self.mask_head[-1].get_seg_masks(\n                            merged_mask, _bboxes[i], det_labels[i],\n                            rcnn_test_cfg, ori_shapes[i], scale_factors[i],\n                            rescale)\n                        segm_results.append(segm_result)\n            ms_segm_result['ensemble'] = segm_results\n\n        if self.with_mask:\n            results = list(\n                zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))\n        else:\n            results = ms_bbox_result['ensemble']\n\n        return results\n\n    def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):\n        \"\"\"Test with augmentations.\n\n        If rescale is False, then returned bboxes and masks will fit the scale\n        of imgs[0].\n        \"\"\"\n        if self.with_semantic:\n            semantic_feats = [\n                self.semantic_head(feat)[1] for feat in img_feats\n            ]\n        else:\n            semantic_feats = [None] * len(img_metas)\n\n        rcnn_test_cfg = self.test_cfg\n        aug_bboxes = []\n        aug_scores = []\n        for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats):\n            # only one image in the batch\n            img_shape = img_meta[0]['img_shape']\n            scale_factor = img_meta[0]['scale_factor']\n            flip = img_meta[0]['flip']\n            flip_direction = img_meta[0]['flip_direction']\n\n            proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n                                     scale_factor, flip, flip_direction)\n            # \"ms\" in variable names means multi-stage\n            ms_scores = []\n\n            rois = bbox2roi([proposals])\n\n            if rois.shape[0] == 0:\n                # There is no proposal in the single image\n                aug_bboxes.append(rois.new_zeros(0, 4))\n                aug_scores.append(rois.new_zeros(0, 1))\n                continue\n\n            for i in range(self.num_stages):\n                bbox_head = self.bbox_head[i]\n                bbox_results = self._bbox_forward(\n                    i, x, rois, semantic_feat=semantic)\n                ms_scores.append(bbox_results['cls_score'])\n\n                if i < self.num_stages - 1:\n                    bbox_label = bbox_results['cls_score'].argmax(dim=1)\n                    rois = bbox_head.regress_by_class(\n                        rois, bbox_label, bbox_results['bbox_pred'],\n                        img_meta[0])\n\n            cls_score = sum(ms_scores) / float(len(ms_scores))\n            bboxes, scores = self.bbox_head[-1].get_bboxes(\n                rois,\n                cls_score,\n                bbox_results['bbox_pred'],\n                img_shape,\n                scale_factor,\n                rescale=False,\n                cfg=None)\n            aug_bboxes.append(bboxes)\n            aug_scores.append(scores)\n\n        # after merging, bboxes will be rescaled to the original image size\n        merged_bboxes, merged_scores = merge_aug_bboxes(\n            aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n        det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n                                                rcnn_test_cfg.score_thr,\n                                                rcnn_test_cfg.nms,\n                                                rcnn_test_cfg.max_per_img)\n\n        bbox_result = bbox2result(det_bboxes, det_labels,\n                                  self.bbox_head[-1].num_classes)\n\n        if self.with_mask:\n            if det_bboxes.shape[0] == 0:\n                segm_result = [[]\n                               for _ in range(self.mask_head[-1].num_classes)]\n            else:\n                aug_masks = []\n                aug_img_metas = []\n                for x, img_meta, semantic in zip(img_feats, img_metas,\n                                                 semantic_feats):\n                    img_shape = img_meta[0]['img_shape']\n                    scale_factor = img_meta[0]['scale_factor']\n                    flip = img_meta[0]['flip']\n                    flip_direction = img_meta[0]['flip_direction']\n                    _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n                                           scale_factor, flip, flip_direction)\n                    mask_rois = bbox2roi([_bboxes])\n                    mask_feats = self.mask_roi_extractor[-1](\n                        x[:len(self.mask_roi_extractor[-1].featmap_strides)],\n                        mask_rois)\n                    if self.with_semantic:\n                        semantic_feat = semantic\n                        mask_semantic_feat = self.semantic_roi_extractor(\n                            [semantic_feat], mask_rois)\n                        if mask_semantic_feat.shape[-2:] != mask_feats.shape[\n                                -2:]:\n                            mask_semantic_feat = F.adaptive_avg_pool2d(\n                                mask_semantic_feat, mask_feats.shape[-2:])\n                        mask_feats = mask_feats + mask_semantic_feat\n                    last_feat = None\n                    for i in range(self.num_stages):\n                        mask_head = self.mask_head[i]\n                        if self.mask_info_flow:\n                            mask_pred, last_feat = mask_head(\n                                mask_feats, last_feat)\n                        else:\n                            mask_pred = mask_head(mask_feats)\n                        aug_masks.append(mask_pred.sigmoid().cpu().numpy())\n                        aug_img_metas.append(img_meta)\n                merged_masks = merge_aug_masks(aug_masks, aug_img_metas,\n                                               self.test_cfg)\n\n                ori_shape = img_metas[0][0]['ori_shape']\n                segm_result = self.mask_head[-1].get_seg_masks(\n                    merged_masks,\n                    det_bboxes,\n                    det_labels,\n                    rcnn_test_cfg,\n                    ori_shape,\n                    scale_factor=1.0,\n                    rescale=False)\n            return [(bbox_result, segm_result)]\n        else:\n            return [bbox_result]\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .coarse_mask_head import CoarseMaskHead\nfrom .dynamic_mask_head import DynamicMaskHead\nfrom .fcn_mask_head import FCNMaskHead\nfrom .feature_relay_head import FeatureRelayHead\nfrom .fused_semantic_head import FusedSemanticHead\nfrom .global_context_head import GlobalContextHead\nfrom .grid_head import GridHead\nfrom .htc_mask_head import HTCMaskHead\nfrom .mask_point_head import MaskPointHead\nfrom .maskiou_head import MaskIoUHead\nfrom .scnet_mask_head import SCNetMaskHead\nfrom .scnet_semantic_head import SCNetSemanticHead\n\n__all__ = [\n    'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',\n    'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead',\n    'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead',\n    'DynamicMaskHead'\n]\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/coarse_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.cnn import ConvModule, Linear\nfrom mmcv.runner import ModuleList, auto_fp16\n\nfrom mmdet.models.builder import HEADS\nfrom .fcn_mask_head import FCNMaskHead\n\n\n@HEADS.register_module()\nclass CoarseMaskHead(FCNMaskHead):\n    \"\"\"Coarse mask head used in PointRend.\n\n    Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample\n    the input feature map instead of upsample it.\n\n    Args:\n        num_convs (int): Number of conv layers in the head. Default: 0.\n        num_fcs (int): Number of fc layers in the head. Default: 2.\n        fc_out_channels (int): Number of output channels of fc layer.\n            Default: 1024.\n        downsample_factor (int): The factor that feature map is downsampled by.\n            Default: 2.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_convs=0,\n                 num_fcs=2,\n                 fc_out_channels=1024,\n                 downsample_factor=2,\n                 init_cfg=dict(\n                     type='Xavier',\n                     override=[\n                         dict(name='fcs'),\n                         dict(type='Constant', val=0.001, name='fc_logits')\n                     ]),\n                 *arg,\n                 **kwarg):\n        super(CoarseMaskHead, self).__init__(\n            *arg,\n            num_convs=num_convs,\n            upsample_cfg=dict(type=None),\n            init_cfg=None,\n            **kwarg)\n        self.init_cfg = init_cfg\n        self.num_fcs = num_fcs\n        assert self.num_fcs > 0\n        self.fc_out_channels = fc_out_channels\n        self.downsample_factor = downsample_factor\n        assert self.downsample_factor >= 1\n        # remove conv_logit\n        delattr(self, 'conv_logits')\n\n        if downsample_factor > 1:\n            downsample_in_channels = (\n                self.conv_out_channels\n                if self.num_convs > 0 else self.in_channels)\n            self.downsample_conv = ConvModule(\n                downsample_in_channels,\n                self.conv_out_channels,\n                kernel_size=downsample_factor,\n                stride=downsample_factor,\n                padding=0,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n        else:\n            self.downsample_conv = None\n\n        self.output_size = (self.roi_feat_size[0] // downsample_factor,\n                            self.roi_feat_size[1] // downsample_factor)\n        self.output_area = self.output_size[0] * self.output_size[1]\n\n        last_layer_dim = self.conv_out_channels * self.output_area\n\n        self.fcs = ModuleList()\n        for i in range(num_fcs):\n            fc_in_channels = (\n                last_layer_dim if i == 0 else self.fc_out_channels)\n            self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))\n        last_layer_dim = self.fc_out_channels\n        output_channels = self.num_classes * self.output_area\n        self.fc_logits = Linear(last_layer_dim, output_channels)\n\n    def init_weights(self):\n        super(FCNMaskHead, self).init_weights()\n\n    @auto_fp16()\n    def forward(self, x):\n        for conv in self.convs:\n            x = conv(x)\n\n        if self.downsample_conv is not None:\n            x = self.downsample_conv(x)\n\n        x = x.flatten(1)\n        for fc in self.fcs:\n            x = self.relu(fc(x))\n        mask_pred = self.fc_logits(x).view(\n            x.size(0), self.num_classes, *self.output_size)\n        return mask_pred\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner import auto_fp16, force_fp32\n\nfrom mmdet.core import mask_target\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.dense_heads.atss_head import reduce_mean\nfrom mmdet.models.utils import build_transformer\nfrom .fcn_mask_head import FCNMaskHead\n\n\n@HEADS.register_module()\nclass DynamicMaskHead(FCNMaskHead):\n    r\"\"\"Dynamic Mask Head for\n    `Instances as Queries <http://arxiv.org/abs/2105.01928>`_\n\n    Args:\n        num_convs (int): Number of convolution layer.\n            Defaults to 4.\n        roi_feat_size (int): The output size of RoI extractor,\n            Defaults to 14.\n        in_channels (int): Input feature channels.\n            Defaults to 256.\n        conv_kernel_size (int): Kernel size of convolution layers.\n            Defaults to 3.\n        conv_out_channels (int): Output channels of convolution layers.\n            Defaults to 256.\n        num_classes (int): Number of classes.\n            Defaults to 80\n        class_agnostic (int): Whether generate class agnostic prediction.\n            Defaults to False.\n        dropout (float): Probability of drop the channel.\n            Defaults to 0.0\n        upsample_cfg (dict): The config for upsample layer.\n        conv_cfg (dict): The convolution layer config.\n        norm_cfg (dict): The norm layer config.\n        dynamic_conv_cfg (dict): The dynamic convolution layer config.\n        loss_mask (dict): The config for mask loss.\n    \"\"\"\n\n    def __init__(self,\n                 num_convs=4,\n                 roi_feat_size=14,\n                 in_channels=256,\n                 conv_kernel_size=3,\n                 conv_out_channels=256,\n                 num_classes=80,\n                 class_agnostic=False,\n                 upsample_cfg=dict(type='deconv', scale_factor=2),\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 dynamic_conv_cfg=dict(\n                     type='DynamicConv',\n                     in_channels=256,\n                     feat_channels=64,\n                     out_channels=256,\n                     input_feat_shape=14,\n                     with_proj=False,\n                     act_cfg=dict(type='ReLU', inplace=True),\n                     norm_cfg=dict(type='LN')),\n                 loss_mask=dict(type='DiceLoss', loss_weight=8.0),\n                 **kwargs):\n        super(DynamicMaskHead, self).__init__(\n            num_convs=num_convs,\n            roi_feat_size=roi_feat_size,\n            in_channels=in_channels,\n            conv_kernel_size=conv_kernel_size,\n            conv_out_channels=conv_out_channels,\n            num_classes=num_classes,\n            class_agnostic=class_agnostic,\n            upsample_cfg=upsample_cfg,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            loss_mask=loss_mask,\n            **kwargs)\n        assert class_agnostic is False, \\\n            'DynamicMaskHead only support class_agnostic=False'\n        self.fp16_enabled = False\n\n        self.instance_interactive_conv = build_transformer(dynamic_conv_cfg)\n\n    def init_weights(self):\n        \"\"\"Use xavier initialization for all weight parameter and set\n        classification head bias as a specific value when use focal loss.\"\"\"\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n            nn.init.constant_(self.conv_logits.bias, 0.)\n\n    @auto_fp16()\n    def forward(self, roi_feat, proposal_feat):\n        \"\"\"Forward function of DynamicMaskHead.\n\n        Args:\n            roi_feat (Tensor): Roi-pooling features with shape\n                (batch_size*num_proposals, feature_dimensions,\n                pooling_h , pooling_w).\n            proposal_feat (Tensor): Intermediate feature get from\n                diihead in last stage, has shape\n                (batch_size*num_proposals, feature_dimensions)\n\n          Returns:\n            mask_pred (Tensor): Predicted foreground masks with shape\n                (batch_size*num_proposals, num_classes,\n                                        pooling_h*2, pooling_w*2).\n        \"\"\"\n\n        proposal_feat = proposal_feat.reshape(-1, self.in_channels)\n        proposal_feat_iic = self.instance_interactive_conv(\n            proposal_feat, roi_feat)\n\n        x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size())\n\n        for conv in self.convs:\n            x = conv(x)\n        if self.upsample is not None:\n            x = self.upsample(x)\n            if self.upsample_method == 'deconv':\n                x = self.relu(x)\n        mask_pred = self.conv_logits(x)\n        return mask_pred\n\n    @force_fp32(apply_to=('mask_pred', ))\n    def loss(self, mask_pred, mask_targets, labels):\n        num_pos = labels.new_ones(labels.size()).float().sum()\n        avg_factor = torch.clamp(reduce_mean(num_pos), min=1.).item()\n        loss = dict()\n        if mask_pred.size(0) == 0:\n            loss_mask = mask_pred.sum()\n        else:\n            loss_mask = self.loss_mask(\n                mask_pred[torch.arange(num_pos).long(), labels, ...].sigmoid(),\n                mask_targets,\n                avg_factor=avg_factor)\n        loss['loss_mask'] = loss_mask\n        return loss\n\n    def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):\n\n        pos_proposals = [res.pos_bboxes for res in sampling_results]\n        pos_assigned_gt_inds = [\n            res.pos_assigned_gt_inds for res in sampling_results\n        ]\n        mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,\n                                   gt_masks, rcnn_train_cfg)\n        return mask_targets\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/fcn_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom warnings import warn\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer\nfrom mmcv.ops.carafe import CARAFEPack\nfrom mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.core import mask_target\nfrom mmdet.models.builder import HEADS, build_loss\n\nBYTES_PER_FLOAT = 4\n# TODO: This memory limit may be too much or too little. It would be better to\n# determine it based on available resources.\nGPU_MEM_LIMIT = 1024**3  # 1 GB memory limit\n\n\n@HEADS.register_module()\nclass FCNMaskHead(BaseModule):\n\n    def __init__(self,\n                 num_convs=4,\n                 roi_feat_size=14,\n                 in_channels=256,\n                 conv_kernel_size=3,\n                 conv_out_channels=256,\n                 num_classes=80,\n                 class_agnostic=False,\n                 upsample_cfg=dict(type='deconv', scale_factor=2),\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 predictor_cfg=dict(type='Conv'),\n                 loss_mask=dict(\n                     type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),\n                 init_cfg=None):\n        assert init_cfg is None, 'To prevent abnormal initialization ' \\\n                                 'behavior, init_cfg is not allowed to be set'\n        super(FCNMaskHead, self).__init__(init_cfg)\n        self.upsample_cfg = upsample_cfg.copy()\n        if self.upsample_cfg['type'] not in [\n                None, 'deconv', 'nearest', 'bilinear', 'carafe'\n        ]:\n            raise ValueError(\n                f'Invalid upsample method {self.upsample_cfg[\"type\"]}, '\n                'accepted methods are \"deconv\", \"nearest\", \"bilinear\", '\n                '\"carafe\"')\n        self.num_convs = num_convs\n        # WARN: roi_feat_size is reserved and not used\n        self.roi_feat_size = _pair(roi_feat_size)\n        self.in_channels = in_channels\n        self.conv_kernel_size = conv_kernel_size\n        self.conv_out_channels = conv_out_channels\n        self.upsample_method = self.upsample_cfg.get('type')\n        self.scale_factor = self.upsample_cfg.pop('scale_factor', None)\n        self.num_classes = num_classes\n        self.class_agnostic = class_agnostic\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.predictor_cfg = predictor_cfg\n        self.fp16_enabled = False\n        self.loss_mask = build_loss(loss_mask)\n\n        self.convs = ModuleList()\n        for i in range(self.num_convs):\n            in_channels = (\n                self.in_channels if i == 0 else self.conv_out_channels)\n            padding = (self.conv_kernel_size - 1) // 2\n            self.convs.append(\n                ConvModule(\n                    in_channels,\n                    self.conv_out_channels,\n                    self.conv_kernel_size,\n                    padding=padding,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg))\n        upsample_in_channels = (\n            self.conv_out_channels if self.num_convs > 0 else in_channels)\n        upsample_cfg_ = self.upsample_cfg.copy()\n        if self.upsample_method is None:\n            self.upsample = None\n        elif self.upsample_method == 'deconv':\n            upsample_cfg_.update(\n                in_channels=upsample_in_channels,\n                out_channels=self.conv_out_channels,\n                kernel_size=self.scale_factor,\n                stride=self.scale_factor)\n            self.upsample = build_upsample_layer(upsample_cfg_)\n        elif self.upsample_method == 'carafe':\n            upsample_cfg_.update(\n                channels=upsample_in_channels, scale_factor=self.scale_factor)\n            self.upsample = build_upsample_layer(upsample_cfg_)\n        else:\n            # suppress warnings\n            align_corners = (None\n                             if self.upsample_method == 'nearest' else False)\n            upsample_cfg_.update(\n                scale_factor=self.scale_factor,\n                mode=self.upsample_method,\n                align_corners=align_corners)\n            self.upsample = build_upsample_layer(upsample_cfg_)\n\n        out_channels = 1 if self.class_agnostic else self.num_classes\n        logits_in_channel = (\n            self.conv_out_channels\n            if self.upsample_method == 'deconv' else upsample_in_channels)\n        self.conv_logits = build_conv_layer(self.predictor_cfg,\n                                            logits_in_channel, out_channels, 1)\n        self.relu = nn.ReLU(inplace=True)\n        self.debug_imgs = None\n\n    def init_weights(self):\n        super(FCNMaskHead, self).init_weights()\n        for m in [self.upsample, self.conv_logits]:\n            if m is None:\n                continue\n            elif isinstance(m, CARAFEPack):\n                m.init_weights()\n            elif hasattr(m, 'weight') and hasattr(m, 'bias'):\n                nn.init.kaiming_normal_(\n                    m.weight, mode='fan_out', nonlinearity='relu')\n                nn.init.constant_(m.bias, 0)\n\n    @auto_fp16()\n    def forward(self, x):\n        for conv in self.convs:\n            x = conv(x)\n        if self.upsample is not None:\n            x = self.upsample(x)\n            if self.upsample_method == 'deconv':\n                x = self.relu(x)\n        mask_pred = self.conv_logits(x)\n        return mask_pred\n\n    def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):\n        pos_proposals = [res.pos_bboxes for res in sampling_results]\n        pos_assigned_gt_inds = [\n            res.pos_assigned_gt_inds for res in sampling_results\n        ]\n        mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,\n                                   gt_masks, rcnn_train_cfg)\n        return mask_targets\n\n    @force_fp32(apply_to=('mask_pred', ))\n    def loss(self, mask_pred, mask_targets, labels):\n        \"\"\"\n        Example:\n            >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import *  # NOQA\n            >>> N = 7  # N = number of extracted ROIs\n            >>> C, H, W = 11, 32, 32\n            >>> # Create example instance of FCN Mask Head.\n            >>> # There are lots of variations depending on the configuration\n            >>> self = FCNMaskHead(num_classes=C, num_convs=1)\n            >>> inputs = torch.rand(N, self.in_channels, H, W)\n            >>> mask_pred = self.forward(inputs)\n            >>> sf = self.scale_factor\n            >>> labels = torch.randint(0, C, size=(N,))\n            >>> # With the default properties the mask targets should indicate\n            >>> # a (potentially soft) single-class label\n            >>> mask_targets = torch.rand(N, H * sf, W * sf)\n            >>> loss = self.loss(mask_pred, mask_targets, labels)\n            >>> print('loss = {!r}'.format(loss))\n        \"\"\"\n        loss = dict()\n        if mask_pred.size(0) == 0:\n            loss_mask = mask_pred.sum()\n        else:\n            if self.class_agnostic:\n                loss_mask = self.loss_mask(mask_pred, mask_targets,\n                                           torch.zeros_like(labels))\n            else:\n                loss_mask = self.loss_mask(mask_pred, mask_targets, labels)\n        loss['loss_mask'] = loss_mask\n        return loss\n\n    def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,\n                      ori_shape, scale_factor, rescale):\n        \"\"\"Get segmentation masks from mask_pred and bboxes.\n\n        Args:\n            mask_pred (Tensor or ndarray): shape (n, #class, h, w).\n                For single-scale testing, mask_pred is the direct output of\n                model, whose type is Tensor, while for multi-scale testing,\n                it will be converted to numpy array outside of this method.\n            det_bboxes (Tensor): shape (n, 4/5)\n            det_labels (Tensor): shape (n, )\n            rcnn_test_cfg (dict): rcnn testing config\n            ori_shape (Tuple): original image height and width, shape (2,)\n            scale_factor(ndarray | Tensor): If ``rescale is True``, box\n                coordinates are divided by this scale factor to fit\n                ``ori_shape``.\n            rescale (bool): If True, the resulting masks will be rescaled to\n                ``ori_shape``.\n\n        Returns:\n            list[list]: encoded masks. The c-th item in the outer list\n                corresponds to the c-th class. Given the c-th outer list, the\n                i-th item in that inner list is the mask for the i-th box with\n                class label c.\n\n        Example:\n            >>> import mmcv\n            >>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import *  # NOQA\n            >>> N = 7  # N = number of extracted ROIs\n            >>> C, H, W = 11, 32, 32\n            >>> # Create example instance of FCN Mask Head.\n            >>> self = FCNMaskHead(num_classes=C, num_convs=0)\n            >>> inputs = torch.rand(N, self.in_channels, H, W)\n            >>> mask_pred = self.forward(inputs)\n            >>> # Each input is associated with some bounding box\n            >>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)\n            >>> det_labels = torch.randint(0, C, size=(N,))\n            >>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })\n            >>> ori_shape = (H * 4, W * 4)\n            >>> scale_factor = torch.FloatTensor((1, 1))\n            >>> rescale = False\n            >>> # Encoded masks are a list for each category.\n            >>> encoded_masks = self.get_seg_masks(\n            >>>     mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,\n            >>>     scale_factor, rescale\n            >>> )\n            >>> assert len(encoded_masks) == C\n            >>> assert sum(list(map(len, encoded_masks))) == N\n        \"\"\"\n        if isinstance(mask_pred, torch.Tensor):\n            mask_pred = mask_pred.sigmoid()\n        else:\n            # In AugTest, has been activated before\n            mask_pred = det_bboxes.new_tensor(mask_pred)\n\n        device = mask_pred.device\n        cls_segms = [[] for _ in range(self.num_classes)\n                     ]  # BG is not included in num_classes\n        bboxes = det_bboxes[:, :4]\n        labels = det_labels\n\n        # In most cases, scale_factor should have been\n        # converted to Tensor when rescale the bbox\n        if not isinstance(scale_factor, torch.Tensor):\n            if isinstance(scale_factor, float):\n                scale_factor = np.array([scale_factor] * 4)\n                warn('Scale_factor should be a Tensor or ndarray '\n                     'with shape (4,), float would be deprecated. ')\n            assert isinstance(scale_factor, np.ndarray)\n            scale_factor = torch.Tensor(scale_factor)\n\n        if rescale:\n            img_h, img_w = ori_shape[:2]\n            bboxes = bboxes / scale_factor.to(bboxes)\n        else:\n            w_scale, h_scale = scale_factor[0], scale_factor[1]\n            img_h = np.round(ori_shape[0] * h_scale.item()).astype(np.int32)\n            img_w = np.round(ori_shape[1] * w_scale.item()).astype(np.int32)\n\n        N = len(mask_pred)\n        # The actual implementation split the input into chunks,\n        # and paste them chunk by chunk.\n        if device.type == 'cpu':\n            # CPU is most efficient when they are pasted one by one with\n            # skip_empty=True, so that it performs minimal number of\n            # operations.\n            num_chunks = N\n        else:\n            # GPU benefits from parallelism for larger chunks,\n            # but may have memory issue\n            # the types of img_w and img_h are np.int32,\n            # when the image resolution is large,\n            # the calculation of num_chunks will overflow.\n            # so we need to change the types of img_w and img_h to int.\n            # See https://github.com/open-mmlab/mmdetection/pull/5191\n            num_chunks = int(\n                np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT /\n                        GPU_MEM_LIMIT))\n            assert (num_chunks <=\n                    N), 'Default GPU_MEM_LIMIT is too small; try increasing it'\n        chunks = torch.chunk(torch.arange(N, device=device), num_chunks)\n\n        threshold = rcnn_test_cfg.mask_thr_binary\n        im_mask = torch.zeros(\n            N,\n            img_h,\n            img_w,\n            device=device,\n            dtype=torch.bool if threshold >= 0 else torch.uint8)\n\n        if not self.class_agnostic:\n            mask_pred = mask_pred[range(N), labels][:, None]\n\n        for inds in chunks:\n            masks_chunk, spatial_inds = _do_paste_mask(\n                mask_pred[inds],\n                bboxes[inds],\n                img_h,\n                img_w,\n                skip_empty=device.type == 'cpu')\n\n            if threshold >= 0:\n                masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)\n            else:\n                # for visualization and debugging\n                masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)\n\n            im_mask[(inds, ) + spatial_inds] = masks_chunk\n\n        for i in range(N):\n            cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())\n        return cls_segms\n\n    def onnx_export(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,\n                    ori_shape, **kwargs):\n        \"\"\"Get segmentation masks from mask_pred and bboxes.\n\n        Args:\n            mask_pred (Tensor): shape (n, #class, h, w).\n            det_bboxes (Tensor): shape (n, 4/5)\n            det_labels (Tensor): shape (n, )\n            rcnn_test_cfg (dict): rcnn testing config\n            ori_shape (Tuple): original image height and width, shape (2,)\n\n        Returns:\n            Tensor: a mask of shape (N, img_h, img_w).\n        \"\"\"\n\n        mask_pred = mask_pred.sigmoid()\n        bboxes = det_bboxes[:, :4]\n        labels = det_labels\n        # No need to consider rescale and scale_factor while exporting to ONNX\n        img_h, img_w = ori_shape[:2]\n        threshold = rcnn_test_cfg.mask_thr_binary\n        if not self.class_agnostic:\n            box_inds = torch.arange(mask_pred.shape[0])\n            mask_pred = mask_pred[box_inds, labels][:, None]\n        masks, _ = _do_paste_mask(\n            mask_pred, bboxes, img_h, img_w, skip_empty=False)\n        if threshold >= 0:\n            # should convert to float to avoid problems in TRT\n            masks = (masks >= threshold).to(dtype=torch.float)\n        return masks\n\n\ndef _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):\n    \"\"\"Paste instance masks according to boxes.\n\n    This implementation is modified from\n    https://github.com/facebookresearch/detectron2/\n\n    Args:\n        masks (Tensor): N, 1, H, W\n        boxes (Tensor): N, 4\n        img_h (int): Height of the image to be pasted.\n        img_w (int): Width of the image to be pasted.\n        skip_empty (bool): Only paste masks within the region that\n            tightly bound all boxes, and returns the results this region only.\n            An important optimization for CPU.\n\n    Returns:\n        tuple: (Tensor, tuple). The first item is mask tensor, the second one\n            is the slice object.\n        If skip_empty == False, the whole image will be pasted. It will\n            return a mask of shape (N, img_h, img_w) and an empty tuple.\n        If skip_empty == True, only area around the mask will be pasted.\n            A mask of shape (N, h', w') and its start and end coordinates\n            in the original image will be returned.\n    \"\"\"\n    # On GPU, paste all masks together (up to chunk size)\n    # by using the entire image to sample the masks\n    # Compared to pasting them one by one,\n    # this has more operations but is faster on COCO-scale dataset.\n    device = masks.device\n    if skip_empty:\n        x0_int, y0_int = torch.clamp(\n            boxes.min(dim=0).values.floor()[:2] - 1,\n            min=0).to(dtype=torch.int32)\n        x1_int = torch.clamp(\n            boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)\n        y1_int = torch.clamp(\n            boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)\n    else:\n        x0_int, y0_int = 0, 0\n        x1_int, y1_int = img_w, img_h\n    x0, y0, x1, y1 = torch.split(boxes, 1, dim=1)  # each is Nx1\n\n    N = masks.shape[0]\n\n    img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5\n    img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5\n    img_y = (img_y - y0) / (y1 - y0) * 2 - 1\n    img_x = (img_x - x0) / (x1 - x0) * 2 - 1\n    # img_x, img_y have shapes (N, w), (N, h)\n    # IsInf op is not supported with ONNX<=1.7.0\n    if not torch.onnx.is_in_onnx_export():\n        if torch.isinf(img_x).any():\n            inds = torch.where(torch.isinf(img_x))\n            img_x[inds] = 0\n        if torch.isinf(img_y).any():\n            inds = torch.where(torch.isinf(img_y))\n            img_y[inds] = 0\n\n    gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))\n    gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))\n    grid = torch.stack([gx, gy], dim=3)\n\n    img_masks = F.grid_sample(\n        masks.to(dtype=torch.float32), grid, align_corners=False)\n\n    if skip_empty:\n        return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))\n    else:\n        return img_masks[:, 0], ()\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/feature_relay_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.runner import BaseModule, auto_fp16\n\nfrom mmdet.models.builder import HEADS\n\n\n@HEADS.register_module()\nclass FeatureRelayHead(BaseModule):\n    \"\"\"Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    Args:\n        in_channels (int, optional): number of input channels. Default: 256.\n        conv_out_channels (int, optional): number of output channels before\n            classification layer. Default: 256.\n        roi_feat_size (int, optional): roi feat size at box head. Default: 7.\n        scale_factor (int, optional): scale factor to match roi feat size\n            at mask head. Default: 2.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels=1024,\n                 out_conv_channels=256,\n                 roi_feat_size=7,\n                 scale_factor=2,\n                 init_cfg=dict(type='Kaiming', layer='Linear')):\n        super(FeatureRelayHead, self).__init__(init_cfg)\n        assert isinstance(roi_feat_size, int)\n\n        self.in_channels = in_channels\n        self.out_conv_channels = out_conv_channels\n        self.roi_feat_size = roi_feat_size\n        self.out_channels = (roi_feat_size**2) * out_conv_channels\n        self.scale_factor = scale_factor\n        self.fp16_enabled = False\n\n        self.fc = nn.Linear(self.in_channels, self.out_channels)\n        self.upsample = nn.Upsample(\n            scale_factor=scale_factor, mode='bilinear', align_corners=True)\n\n    @auto_fp16()\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        N, in_C = x.shape\n        if N > 0:\n            out_C = self.out_conv_channels\n            out_HW = self.roi_feat_size\n            x = self.fc(x)\n            x = x.reshape(N, out_C, out_HW, out_HW)\n            x = self.upsample(x)\n            return x\n        return None\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/fused_semantic_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule, auto_fp16, force_fp32\n\nfrom mmdet.models.builder import HEADS, build_loss\n\n\n@HEADS.register_module()\nclass FusedSemanticHead(BaseModule):\n    r\"\"\"Multi-level fused semantic segmentation head.\n\n    .. code-block:: none\n\n        in_1 -> 1x1 conv ---\n                            |\n        in_2 -> 1x1 conv -- |\n                           ||\n        in_3 -> 1x1 conv - ||\n                          |||                  /-> 1x1 conv (mask prediction)\n        in_4 -> 1x1 conv -----> 3x3 convs (*4)\n                            |                  \\-> 1x1 conv (feature)\n        in_5 -> 1x1 conv ---\n    \"\"\"  # noqa: W605\n\n    def __init__(self,\n                 num_ins,\n                 fusion_level,\n                 num_convs=4,\n                 in_channels=256,\n                 conv_out_channels=256,\n                 num_classes=183,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 ignore_label=None,\n                 loss_weight=None,\n                 loss_seg=dict(\n                     type='CrossEntropyLoss',\n                     ignore_index=255,\n                     loss_weight=0.2),\n                 init_cfg=dict(\n                     type='Kaiming', override=dict(name='conv_logits'))):\n        super(FusedSemanticHead, self).__init__(init_cfg)\n        self.num_ins = num_ins\n        self.fusion_level = fusion_level\n        self.num_convs = num_convs\n        self.in_channels = in_channels\n        self.conv_out_channels = conv_out_channels\n        self.num_classes = num_classes\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.fp16_enabled = False\n\n        self.lateral_convs = nn.ModuleList()\n        for i in range(self.num_ins):\n            self.lateral_convs.append(\n                ConvModule(\n                    self.in_channels,\n                    self.in_channels,\n                    1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    inplace=False))\n\n        self.convs = nn.ModuleList()\n        for i in range(self.num_convs):\n            in_channels = self.in_channels if i == 0 else conv_out_channels\n            self.convs.append(\n                ConvModule(\n                    in_channels,\n                    conv_out_channels,\n                    3,\n                    padding=1,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg))\n        self.conv_embedding = ConvModule(\n            conv_out_channels,\n            conv_out_channels,\n            1,\n            conv_cfg=self.conv_cfg,\n            norm_cfg=self.norm_cfg)\n        self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)\n        if ignore_label:\n            loss_seg['ignore_index'] = ignore_label\n        if loss_weight:\n            loss_seg['loss_weight'] = loss_weight\n        if ignore_label or loss_weight:\n            warnings.warn('``ignore_label`` and ``loss_weight`` would be '\n                          'deprecated soon. Please set ``ingore_index`` and '\n                          '``loss_weight`` in ``loss_seg`` instead.')\n        self.criterion = build_loss(loss_seg)\n\n    @auto_fp16()\n    def forward(self, feats):\n        x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])\n        fused_size = tuple(x.shape[-2:])\n        for i, feat in enumerate(feats):\n            if i != self.fusion_level:\n                feat = F.interpolate(\n                    feat, size=fused_size, mode='bilinear', align_corners=True)\n                # fix runtime error of \"+=\" inplace operation in PyTorch 1.10\n                x = x + self.lateral_convs[i](feat)\n\n        for i in range(self.num_convs):\n            x = self.convs[i](x)\n\n        mask_pred = self.conv_logits(x)\n        x = self.conv_embedding(x)\n        return mask_pred, x\n\n    @force_fp32(apply_to=('mask_pred', ))\n    def loss(self, mask_pred, labels):\n        labels = labels.squeeze(1).long()\n        loss_semantic_seg = self.criterion(mask_pred, labels)\n        return loss_semantic_seg\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/global_context_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule, auto_fp16, force_fp32\n\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.utils import ResLayer, SimplifiedBasicBlock\n\n\n@HEADS.register_module()\nclass GlobalContextHead(BaseModule):\n    \"\"\"Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    Args:\n        num_convs (int, optional): number of convolutional layer in GlbCtxHead.\n            Default: 4.\n        in_channels (int, optional): number of input channels. Default: 256.\n        conv_out_channels (int, optional): number of output channels before\n            classification layer. Default: 256.\n        num_classes (int, optional): number of classes. Default: 80.\n        loss_weight (float, optional): global context loss weight. Default: 1.\n        conv_cfg (dict, optional): config to init conv layer. Default: None.\n        norm_cfg (dict, optional): config to init norm layer. Default: None.\n        conv_to_res (bool, optional): if True, 2 convs will be grouped into\n            1 `SimplifiedBasicBlock` using a skip connection. Default: False.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_convs=4,\n                 in_channels=256,\n                 conv_out_channels=256,\n                 num_classes=80,\n                 loss_weight=1.0,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 conv_to_res=False,\n                 init_cfg=dict(\n                     type='Normal', std=0.01, override=dict(name='fc'))):\n        super(GlobalContextHead, self).__init__(init_cfg)\n        self.num_convs = num_convs\n        self.in_channels = in_channels\n        self.conv_out_channels = conv_out_channels\n        self.num_classes = num_classes\n        self.loss_weight = loss_weight\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.conv_to_res = conv_to_res\n        self.fp16_enabled = False\n\n        if self.conv_to_res:\n            num_res_blocks = num_convs // 2\n            self.convs = ResLayer(\n                SimplifiedBasicBlock,\n                in_channels,\n                self.conv_out_channels,\n                num_res_blocks,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n            self.num_convs = num_res_blocks\n        else:\n            self.convs = nn.ModuleList()\n            for i in range(self.num_convs):\n                in_channels = self.in_channels if i == 0 else conv_out_channels\n                self.convs.append(\n                    ConvModule(\n                        in_channels,\n                        conv_out_channels,\n                        3,\n                        padding=1,\n                        conv_cfg=self.conv_cfg,\n                        norm_cfg=self.norm_cfg))\n\n        self.pool = nn.AdaptiveAvgPool2d(1)\n        self.fc = nn.Linear(conv_out_channels, num_classes)\n\n        self.criterion = nn.BCEWithLogitsLoss()\n\n    @auto_fp16()\n    def forward(self, feats):\n        \"\"\"Forward function.\"\"\"\n        x = feats[-1]\n        for i in range(self.num_convs):\n            x = self.convs[i](x)\n        x = self.pool(x)\n\n        # multi-class prediction\n        mc_pred = x.reshape(x.size(0), -1)\n        mc_pred = self.fc(mc_pred)\n\n        return mc_pred, x\n\n    @force_fp32(apply_to=('pred', ))\n    def loss(self, pred, labels):\n        \"\"\"Loss function.\"\"\"\n        labels = [lbl.unique() for lbl in labels]\n        targets = pred.new_zeros(pred.size())\n        for i, label in enumerate(labels):\n            targets[i, label] = 1.0\n        loss = self.loss_weight * self.criterion(pred, targets)\n        return loss\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/grid_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\n\nfrom mmdet.models.builder import HEADS, build_loss\n\n\n@HEADS.register_module()\nclass GridHead(BaseModule):\n\n    def __init__(self,\n                 grid_points=9,\n                 num_convs=8,\n                 roi_feat_size=14,\n                 in_channels=256,\n                 conv_kernel_size=3,\n                 point_feat_channels=64,\n                 deconv_kernel_size=4,\n                 class_agnostic=False,\n                 loss_grid=dict(\n                     type='CrossEntropyLoss', use_sigmoid=True,\n                     loss_weight=15),\n                 conv_cfg=None,\n                 norm_cfg=dict(type='GN', num_groups=36),\n                 init_cfg=[\n                     dict(type='Kaiming', layer=['Conv2d', 'Linear']),\n                     dict(\n                         type='Normal',\n                         layer='ConvTranspose2d',\n                         std=0.001,\n                         override=dict(\n                             type='Normal',\n                             name='deconv2',\n                             std=0.001,\n                             bias=-np.log(0.99 / 0.01)))\n                 ]):\n        super(GridHead, self).__init__(init_cfg)\n        self.grid_points = grid_points\n        self.num_convs = num_convs\n        self.roi_feat_size = roi_feat_size\n        self.in_channels = in_channels\n        self.conv_kernel_size = conv_kernel_size\n        self.point_feat_channels = point_feat_channels\n        self.conv_out_channels = self.point_feat_channels * self.grid_points\n        self.class_agnostic = class_agnostic\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':\n            assert self.conv_out_channels % norm_cfg['num_groups'] == 0\n\n        assert self.grid_points >= 4\n        self.grid_size = int(np.sqrt(self.grid_points))\n        if self.grid_size * self.grid_size != self.grid_points:\n            raise ValueError('grid_points must be a square number')\n\n        # the predicted heatmap is half of whole_map_size\n        if not isinstance(self.roi_feat_size, int):\n            raise ValueError('Only square RoIs are supporeted in Grid R-CNN')\n        self.whole_map_size = self.roi_feat_size * 4\n\n        # compute point-wise sub-regions\n        self.sub_regions = self.calc_sub_regions()\n\n        self.convs = []\n        for i in range(self.num_convs):\n            in_channels = (\n                self.in_channels if i == 0 else self.conv_out_channels)\n            stride = 2 if i == 0 else 1\n            padding = (self.conv_kernel_size - 1) // 2\n            self.convs.append(\n                ConvModule(\n                    in_channels,\n                    self.conv_out_channels,\n                    self.conv_kernel_size,\n                    stride=stride,\n                    padding=padding,\n                    conv_cfg=self.conv_cfg,\n                    norm_cfg=self.norm_cfg,\n                    bias=True))\n        self.convs = nn.Sequential(*self.convs)\n\n        self.deconv1 = nn.ConvTranspose2d(\n            self.conv_out_channels,\n            self.conv_out_channels,\n            kernel_size=deconv_kernel_size,\n            stride=2,\n            padding=(deconv_kernel_size - 2) // 2,\n            groups=grid_points)\n        self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)\n        self.deconv2 = nn.ConvTranspose2d(\n            self.conv_out_channels,\n            grid_points,\n            kernel_size=deconv_kernel_size,\n            stride=2,\n            padding=(deconv_kernel_size - 2) // 2,\n            groups=grid_points)\n\n        # find the 4-neighbor of each grid point\n        self.neighbor_points = []\n        grid_size = self.grid_size\n        for i in range(grid_size):  # i-th column\n            for j in range(grid_size):  # j-th row\n                neighbors = []\n                if i > 0:  # left: (i - 1, j)\n                    neighbors.append((i - 1) * grid_size + j)\n                if j > 0:  # up: (i, j - 1)\n                    neighbors.append(i * grid_size + j - 1)\n                if j < grid_size - 1:  # down: (i, j + 1)\n                    neighbors.append(i * grid_size + j + 1)\n                if i < grid_size - 1:  # right: (i + 1, j)\n                    neighbors.append((i + 1) * grid_size + j)\n                self.neighbor_points.append(tuple(neighbors))\n        # total edges in the grid\n        self.num_edges = sum([len(p) for p in self.neighbor_points])\n\n        self.forder_trans = nn.ModuleList()  # first-order feature transition\n        self.sorder_trans = nn.ModuleList()  # second-order feature transition\n        for neighbors in self.neighbor_points:\n            fo_trans = nn.ModuleList()\n            so_trans = nn.ModuleList()\n            for _ in range(len(neighbors)):\n                # each transition module consists of a 5x5 depth-wise conv and\n                # 1x1 conv.\n                fo_trans.append(\n                    nn.Sequential(\n                        nn.Conv2d(\n                            self.point_feat_channels,\n                            self.point_feat_channels,\n                            5,\n                            stride=1,\n                            padding=2,\n                            groups=self.point_feat_channels),\n                        nn.Conv2d(self.point_feat_channels,\n                                  self.point_feat_channels, 1)))\n                so_trans.append(\n                    nn.Sequential(\n                        nn.Conv2d(\n                            self.point_feat_channels,\n                            self.point_feat_channels,\n                            5,\n                            1,\n                            2,\n                            groups=self.point_feat_channels),\n                        nn.Conv2d(self.point_feat_channels,\n                                  self.point_feat_channels, 1)))\n            self.forder_trans.append(fo_trans)\n            self.sorder_trans.append(so_trans)\n\n        self.loss_grid = build_loss(loss_grid)\n\n    def forward(self, x):\n        assert x.shape[-1] == x.shape[-2] == self.roi_feat_size\n        # RoI feature transformation, downsample 2x\n        x = self.convs(x)\n\n        c = self.point_feat_channels\n        # first-order fusion\n        x_fo = [None for _ in range(self.grid_points)]\n        for i, points in enumerate(self.neighbor_points):\n            x_fo[i] = x[:, i * c:(i + 1) * c]\n            for j, point_idx in enumerate(points):\n                x_fo[i] = x_fo[i] + self.forder_trans[i][j](\n                    x[:, point_idx * c:(point_idx + 1) * c])\n\n        # second-order fusion\n        x_so = [None for _ in range(self.grid_points)]\n        for i, points in enumerate(self.neighbor_points):\n            x_so[i] = x[:, i * c:(i + 1) * c]\n            for j, point_idx in enumerate(points):\n                x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])\n\n        # predicted heatmap with fused features\n        x2 = torch.cat(x_so, dim=1)\n        x2 = self.deconv1(x2)\n        x2 = F.relu(self.norm1(x2), inplace=True)\n        heatmap = self.deconv2(x2)\n\n        # predicted heatmap with original features (applicable during training)\n        if self.training:\n            x1 = x\n            x1 = self.deconv1(x1)\n            x1 = F.relu(self.norm1(x1), inplace=True)\n            heatmap_unfused = self.deconv2(x1)\n        else:\n            heatmap_unfused = heatmap\n\n        return dict(fused=heatmap, unfused=heatmap_unfused)\n\n    def calc_sub_regions(self):\n        \"\"\"Compute point specific representation regions.\n\n        See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details.\n        \"\"\"\n        # to make it consistent with the original implementation, half_size\n        # is computed as 2 * quarter_size, which is smaller\n        half_size = self.whole_map_size // 4 * 2\n        sub_regions = []\n        for i in range(self.grid_points):\n            x_idx = i // self.grid_size\n            y_idx = i % self.grid_size\n            if x_idx == 0:\n                sub_x1 = 0\n            elif x_idx == self.grid_size - 1:\n                sub_x1 = half_size\n            else:\n                ratio = x_idx / (self.grid_size - 1) - 0.25\n                sub_x1 = max(int(ratio * self.whole_map_size), 0)\n\n            if y_idx == 0:\n                sub_y1 = 0\n            elif y_idx == self.grid_size - 1:\n                sub_y1 = half_size\n            else:\n                ratio = y_idx / (self.grid_size - 1) - 0.25\n                sub_y1 = max(int(ratio * self.whole_map_size), 0)\n            sub_regions.append(\n                (sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))\n        return sub_regions\n\n    def get_targets(self, sampling_results, rcnn_train_cfg):\n        # mix all samples (across images) together.\n        pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],\n                               dim=0).cpu()\n        pos_gt_bboxes = torch.cat(\n            [res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()\n        assert pos_bboxes.shape == pos_gt_bboxes.shape\n\n        # expand pos_bboxes to 2x of original size\n        x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2\n        y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2\n        x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2\n        y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2\n        pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)\n        pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)\n        pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)\n\n        num_rois = pos_bboxes.shape[0]\n        map_size = self.whole_map_size\n        # this is not the final target shape\n        targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),\n                              dtype=torch.float)\n\n        # pre-compute interpolation factors for all grid points.\n        # the first item is the factor of x-dim, and the second is y-dim.\n        # for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)\n        factors = []\n        for j in range(self.grid_points):\n            x_idx = j // self.grid_size\n            y_idx = j % self.grid_size\n            factors.append((1 - x_idx / (self.grid_size - 1),\n                            1 - y_idx / (self.grid_size - 1)))\n\n        radius = rcnn_train_cfg.pos_radius\n        radius2 = radius**2\n        for i in range(num_rois):\n            # ignore small bboxes\n            if (pos_bbox_ws[i] <= self.grid_size\n                    or pos_bbox_hs[i] <= self.grid_size):\n                continue\n            # for each grid point, mark a small circle as positive\n            for j in range(self.grid_points):\n                factor_x, factor_y = factors[j]\n                gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (\n                    1 - factor_x) * pos_gt_bboxes[i, 2]\n                gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (\n                    1 - factor_y) * pos_gt_bboxes[i, 3]\n\n                cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *\n                         map_size)\n                cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *\n                         map_size)\n\n                for x in range(cx - radius, cx + radius + 1):\n                    for y in range(cy - radius, cy + radius + 1):\n                        if x >= 0 and x < map_size and y >= 0 and y < map_size:\n                            if (x - cx)**2 + (y - cy)**2 <= radius2:\n                                targets[i, j, y, x] = 1\n        # reduce the target heatmap size by a half\n        # proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).\n        sub_targets = []\n        for i in range(self.grid_points):\n            sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]\n            sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])\n        sub_targets = torch.cat(sub_targets, dim=1)\n        sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device)\n        return sub_targets\n\n    def loss(self, grid_pred, grid_targets):\n        loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)\n        loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)\n        loss_grid = loss_fused + loss_unfused\n        return dict(loss_grid=loss_grid)\n\n    def get_bboxes(self, det_bboxes, grid_pred, img_metas):\n        # TODO: refactoring\n        assert det_bboxes.shape[0] == grid_pred.shape[0]\n        det_bboxes = det_bboxes.cpu()\n        cls_scores = det_bboxes[:, [4]]\n        det_bboxes = det_bboxes[:, :4]\n        grid_pred = grid_pred.sigmoid().cpu()\n\n        R, c, h, w = grid_pred.shape\n        half_size = self.whole_map_size // 4 * 2\n        assert h == w == half_size\n        assert c == self.grid_points\n\n        # find the point with max scores in the half-sized heatmap\n        grid_pred = grid_pred.view(R * c, h * w)\n        pred_scores, pred_position = grid_pred.max(dim=1)\n        xs = pred_position % w\n        ys = pred_position // w\n\n        # get the position in the whole heatmap instead of half-sized heatmap\n        for i in range(self.grid_points):\n            xs[i::self.grid_points] += self.sub_regions[i][0]\n            ys[i::self.grid_points] += self.sub_regions[i][1]\n\n        # reshape to (num_rois, grid_points)\n        pred_scores, xs, ys = tuple(\n            map(lambda x: x.view(R, c), [pred_scores, xs, ys]))\n\n        # get expanded pos_bboxes\n        widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1)\n        heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1)\n        x1 = (det_bboxes[:, 0, None] - widths / 2)\n        y1 = (det_bboxes[:, 1, None] - heights / 2)\n        # map the grid point to the absolute coordinates\n        abs_xs = (xs.float() + 0.5) / w * widths + x1\n        abs_ys = (ys.float() + 0.5) / h * heights + y1\n\n        # get the grid points indices that fall on the bbox boundaries\n        x1_inds = [i for i in range(self.grid_size)]\n        y1_inds = [i * self.grid_size for i in range(self.grid_size)]\n        x2_inds = [\n            self.grid_points - self.grid_size + i\n            for i in range(self.grid_size)\n        ]\n        y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]\n\n        # voting of all grid points on some boundary\n        bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(\n            dim=1, keepdim=True) / (\n                pred_scores[:, x1_inds].sum(dim=1, keepdim=True))\n        bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(\n            dim=1, keepdim=True) / (\n                pred_scores[:, y1_inds].sum(dim=1, keepdim=True))\n        bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(\n            dim=1, keepdim=True) / (\n                pred_scores[:, x2_inds].sum(dim=1, keepdim=True))\n        bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(\n            dim=1, keepdim=True) / (\n                pred_scores[:, y2_inds].sum(dim=1, keepdim=True))\n\n        bbox_res = torch.cat(\n            [bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1)\n        bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1])\n        bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0])\n\n        return bbox_res\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/htc_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.cnn import ConvModule\n\nfrom mmdet.models.builder import HEADS\nfrom .fcn_mask_head import FCNMaskHead\n\n\n@HEADS.register_module()\nclass HTCMaskHead(FCNMaskHead):\n\n    def __init__(self, with_conv_res=True, *args, **kwargs):\n        super(HTCMaskHead, self).__init__(*args, **kwargs)\n        self.with_conv_res = with_conv_res\n        if self.with_conv_res:\n            self.conv_res = ConvModule(\n                self.conv_out_channels,\n                self.conv_out_channels,\n                1,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n\n    def forward(self, x, res_feat=None, return_logits=True, return_feat=True):\n        if res_feat is not None:\n            assert self.with_conv_res\n            res_feat = self.conv_res(res_feat)\n            x = x + res_feat\n        for conv in self.convs:\n            x = conv(x)\n        res_feat = x\n        outs = []\n        if return_logits:\n            x = self.upsample(x)\n            if self.upsample_method == 'deconv':\n                x = self.relu(x)\n            mask_pred = self.conv_logits(x)\n            outs.append(mask_pred)\n        if return_feat:\n            outs.append(res_feat)\n        return outs if len(outs) > 1 else outs[0]\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/mask_point_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py  # noqa\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops import point_sample, rel_roi_point_to_rel_img_point\nfrom mmcv.runner import BaseModule\n\nfrom mmdet.models.builder import HEADS, build_loss\nfrom mmdet.models.utils import (get_uncertain_point_coords_with_randomness,\n                                get_uncertainty)\n\n\n@HEADS.register_module()\nclass MaskPointHead(BaseModule):\n    \"\"\"A mask point head use in PointRend.\n\n    ``MaskPointHead`` use shared multi-layer perceptron (equivalent to\n    nn.Conv1d) to predict the logit of input points. The fine-grained feature\n    and coarse feature will be concatenate together for predication.\n\n    Args:\n        num_fcs (int): Number of fc layers in the head. Default: 3.\n        in_channels (int): Number of input channels. Default: 256.\n        fc_channels (int): Number of fc channels. Default: 256.\n        num_classes (int): Number of classes for logits. Default: 80.\n        class_agnostic (bool): Whether use class agnostic classification.\n            If so, the output channels of logits will be 1. Default: False.\n        coarse_pred_each_layer (bool): Whether concatenate coarse feature with\n            the output of each fc layer. Default: True.\n        conv_cfg (dict | None): Dictionary to construct and config conv layer.\n            Default: dict(type='Conv1d'))\n        norm_cfg (dict | None): Dictionary to construct and config norm layer.\n            Default: None.\n        loss_point (dict): Dictionary to construct and config loss layer of\n            point head. Default: dict(type='CrossEntropyLoss', use_mask=True,\n            loss_weight=1.0).\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 num_fcs=3,\n                 in_channels=256,\n                 fc_channels=256,\n                 class_agnostic=False,\n                 coarse_pred_each_layer=True,\n                 conv_cfg=dict(type='Conv1d'),\n                 norm_cfg=None,\n                 act_cfg=dict(type='ReLU'),\n                 loss_point=dict(\n                     type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),\n                 init_cfg=dict(\n                     type='Normal', std=0.001,\n                     override=dict(name='fc_logits'))):\n        super().__init__(init_cfg)\n        self.num_fcs = num_fcs\n        self.in_channels = in_channels\n        self.fc_channels = fc_channels\n        self.num_classes = num_classes\n        self.class_agnostic = class_agnostic\n        self.coarse_pred_each_layer = coarse_pred_each_layer\n        self.conv_cfg = conv_cfg\n        self.norm_cfg = norm_cfg\n        self.loss_point = build_loss(loss_point)\n\n        fc_in_channels = in_channels + num_classes\n        self.fcs = nn.ModuleList()\n        for _ in range(num_fcs):\n            fc = ConvModule(\n                fc_in_channels,\n                fc_channels,\n                kernel_size=1,\n                stride=1,\n                padding=0,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n            self.fcs.append(fc)\n            fc_in_channels = fc_channels\n            fc_in_channels += num_classes if self.coarse_pred_each_layer else 0\n\n        out_channels = 1 if self.class_agnostic else self.num_classes\n        self.fc_logits = nn.Conv1d(\n            fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)\n\n    def forward(self, fine_grained_feats, coarse_feats):\n        \"\"\"Classify each point base on fine grained and coarse feats.\n\n        Args:\n            fine_grained_feats (Tensor): Fine grained feature sampled from FPN,\n                shape (num_rois, in_channels, num_points).\n            coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,\n                shape (num_rois, num_classes, num_points).\n\n        Returns:\n            Tensor: Point classification results,\n                shape (num_rois, num_class, num_points).\n        \"\"\"\n\n        x = torch.cat([fine_grained_feats, coarse_feats], dim=1)\n        for fc in self.fcs:\n            x = fc(x)\n            if self.coarse_pred_each_layer:\n                x = torch.cat((x, coarse_feats), dim=1)\n        return self.fc_logits(x)\n\n    def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,\n                    cfg):\n        \"\"\"Get training targets of MaskPointHead for all images.\n\n        Args:\n            rois (Tensor): Region of Interest, shape (num_rois, 5).\n            rel_roi_points: Points coordinates relative to RoI, shape\n                (num_rois, num_points, 2).\n            sampling_results (:obj:`SamplingResult`): Sampling result after\n                sampling and assignment.\n            gt_masks (Tensor) : Ground truth segmentation masks of\n                corresponding boxes, shape (num_rois, height, width).\n            cfg (dict): Training cfg.\n\n        Returns:\n            Tensor: Point target, shape (num_rois, num_points).\n        \"\"\"\n\n        num_imgs = len(sampling_results)\n        rois_list = []\n        rel_roi_points_list = []\n        for batch_ind in range(num_imgs):\n            inds = (rois[:, 0] == batch_ind)\n            rois_list.append(rois[inds])\n            rel_roi_points_list.append(rel_roi_points[inds])\n        pos_assigned_gt_inds_list = [\n            res.pos_assigned_gt_inds for res in sampling_results\n        ]\n        cfg_list = [cfg for _ in range(num_imgs)]\n\n        point_targets = map(self._get_target_single, rois_list,\n                            rel_roi_points_list, pos_assigned_gt_inds_list,\n                            gt_masks, cfg_list)\n        point_targets = list(point_targets)\n\n        if len(point_targets) > 0:\n            point_targets = torch.cat(point_targets)\n\n        return point_targets\n\n    def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,\n                           gt_masks, cfg):\n        \"\"\"Get training target of MaskPointHead for each image.\"\"\"\n        num_pos = rois.size(0)\n        num_points = cfg.num_points\n        if num_pos > 0:\n            gt_masks_th = (\n                gt_masks.to_tensor(rois.dtype, rois.device).index_select(\n                    0, pos_assigned_gt_inds))\n            gt_masks_th = gt_masks_th.unsqueeze(1)\n            rel_img_points = rel_roi_point_to_rel_img_point(\n                rois, rel_roi_points, gt_masks_th)\n            point_targets = point_sample(gt_masks_th,\n                                         rel_img_points).squeeze(1)\n        else:\n            point_targets = rois.new_zeros((0, num_points))\n        return point_targets\n\n    def loss(self, point_pred, point_targets, labels):\n        \"\"\"Calculate loss for MaskPointHead.\n\n        Args:\n            point_pred (Tensor): Point predication result, shape\n                (num_rois, num_classes, num_points).\n            point_targets (Tensor): Point targets, shape (num_roi, num_points).\n            labels (Tensor): Class label of corresponding boxes,\n                shape (num_rois, )\n\n        Returns:\n            dict[str, Tensor]: a dictionary of point loss components\n        \"\"\"\n\n        loss = dict()\n        if self.class_agnostic:\n            loss_point = self.loss_point(point_pred, point_targets,\n                                         torch.zeros_like(labels))\n        else:\n            loss_point = self.loss_point(point_pred, point_targets, labels)\n        loss['loss_point'] = loss_point\n        return loss\n\n    def get_roi_rel_points_train(self, mask_pred, labels, cfg):\n        \"\"\"Get ``num_points`` most uncertain points with random points during\n        train.\n\n        Sample points in [0, 1] x [0, 1] coordinate space based on their\n        uncertainty. The uncertainties are calculated for each point using\n        '_get_uncertainty()' function that takes point's logit prediction as\n        input.\n\n        Args:\n            mask_pred (Tensor): A tensor of shape (num_rois, num_classes,\n                mask_height, mask_width) for class-specific or class-agnostic\n                prediction.\n            labels (list): The ground truth class for each instance.\n            cfg (dict): Training config of point head.\n\n        Returns:\n            point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n                that contains the coordinates sampled points.\n        \"\"\"\n        point_coords = get_uncertain_point_coords_with_randomness(\n            mask_pred, labels, cfg.num_points, cfg.oversample_ratio,\n            cfg.importance_sample_ratio)\n        return point_coords\n\n    def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):\n        \"\"\"Get ``num_points`` most uncertain points during test.\n\n        Args:\n            mask_pred (Tensor): A tensor of shape (num_rois, num_classes,\n                mask_height, mask_width) for class-specific or class-agnostic\n                prediction.\n            pred_label (list): The predication class for each instance.\n            cfg (dict): Testing config of point head.\n\n        Returns:\n            point_indices (Tensor): A tensor of shape (num_rois, num_points)\n                that contains indices from [0, mask_height x mask_width) of the\n                most uncertain points.\n            point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n                that contains [0, 1] x [0, 1] normalized coordinates of the\n                most uncertain points from the [mask_height, mask_width] grid .\n        \"\"\"\n        num_points = cfg.subdivision_num_points\n        uncertainty_map = get_uncertainty(mask_pred, pred_label)\n        num_rois, _, mask_height, mask_width = uncertainty_map.shape\n\n        # During ONNX exporting, the type of each elements of 'shape' is\n        # `Tensor(float)`, while it is `float` during PyTorch inference.\n        if isinstance(mask_height, torch.Tensor):\n            h_step = 1.0 / mask_height.float()\n            w_step = 1.0 / mask_width.float()\n        else:\n            h_step = 1.0 / mask_height\n            w_step = 1.0 / mask_width\n        # cast to int to avoid dynamic K for TopK op in ONNX\n        mask_size = int(mask_height * mask_width)\n        uncertainty_map = uncertainty_map.view(num_rois, mask_size)\n        num_points = min(mask_size, num_points)\n        point_indices = uncertainty_map.topk(num_points, dim=1)[1]\n        xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step\n        ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step\n        point_coords = torch.stack([xs, ys], dim=2)\n        return point_indices, point_coords\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/maskiou_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import Conv2d, Linear, MaxPool2d\nfrom mmcv.runner import BaseModule, force_fp32\nfrom torch.nn.modules.utils import _pair\n\nfrom mmdet.models.builder import HEADS, build_loss\n\n\n@HEADS.register_module()\nclass MaskIoUHead(BaseModule):\n    \"\"\"Mask IoU Head.\n\n    This head predicts the IoU of predicted masks and corresponding gt masks.\n    \"\"\"\n\n    def __init__(self,\n                 num_convs=4,\n                 num_fcs=2,\n                 roi_feat_size=14,\n                 in_channels=256,\n                 conv_out_channels=256,\n                 fc_out_channels=1024,\n                 num_classes=80,\n                 loss_iou=dict(type='MSELoss', loss_weight=0.5),\n                 init_cfg=[\n                     dict(type='Kaiming', override=dict(name='convs')),\n                     dict(type='Caffe2Xavier', override=dict(name='fcs')),\n                     dict(\n                         type='Normal',\n                         std=0.01,\n                         override=dict(name='fc_mask_iou'))\n                 ]):\n        super(MaskIoUHead, self).__init__(init_cfg)\n        self.in_channels = in_channels\n        self.conv_out_channels = conv_out_channels\n        self.fc_out_channels = fc_out_channels\n        self.num_classes = num_classes\n        self.fp16_enabled = False\n\n        self.convs = nn.ModuleList()\n        for i in range(num_convs):\n            if i == 0:\n                # concatenation of mask feature and mask prediction\n                in_channels = self.in_channels + 1\n            else:\n                in_channels = self.conv_out_channels\n            stride = 2 if i == num_convs - 1 else 1\n            self.convs.append(\n                Conv2d(\n                    in_channels,\n                    self.conv_out_channels,\n                    3,\n                    stride=stride,\n                    padding=1))\n\n        roi_feat_size = _pair(roi_feat_size)\n        pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2)\n        self.fcs = nn.ModuleList()\n        for i in range(num_fcs):\n            in_channels = (\n                self.conv_out_channels *\n                pooled_area if i == 0 else self.fc_out_channels)\n            self.fcs.append(Linear(in_channels, self.fc_out_channels))\n\n        self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes)\n        self.relu = nn.ReLU()\n        self.max_pool = MaxPool2d(2, 2)\n        self.loss_iou = build_loss(loss_iou)\n\n    def forward(self, mask_feat, mask_pred):\n        mask_pred = mask_pred.sigmoid()\n        mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1))\n\n        x = torch.cat((mask_feat, mask_pred_pooled), 1)\n\n        for conv in self.convs:\n            x = self.relu(conv(x))\n        x = x.flatten(1)\n        for fc in self.fcs:\n            x = self.relu(fc(x))\n        mask_iou = self.fc_mask_iou(x)\n        return mask_iou\n\n    @force_fp32(apply_to=('mask_iou_pred', ))\n    def loss(self, mask_iou_pred, mask_iou_targets):\n        pos_inds = mask_iou_targets > 0\n        if pos_inds.sum() > 0:\n            loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds],\n                                          mask_iou_targets[pos_inds])\n        else:\n            loss_mask_iou = mask_iou_pred.sum() * 0\n        return dict(loss_mask_iou=loss_mask_iou)\n\n    @force_fp32(apply_to=('mask_pred', ))\n    def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets,\n                    rcnn_train_cfg):\n        \"\"\"Compute target of mask IoU.\n\n        Mask IoU target is the IoU of the predicted mask (inside a bbox) and\n        the gt mask of corresponding gt mask (the whole instance).\n        The intersection area is computed inside the bbox, and the gt mask area\n        is computed with two steps, firstly we compute the gt area inside the\n        bbox, then divide it by the area ratio of gt area inside the bbox and\n        the gt area of the whole instance.\n\n        Args:\n            sampling_results (list[:obj:`SamplingResult`]): sampling results.\n            gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance)\n                of each image, with the same shape of the input image.\n            mask_pred (Tensor): Predicted masks of each positive proposal,\n                shape (num_pos, h, w).\n            mask_targets (Tensor): Gt mask of each positive proposal,\n                binary map of the shape (num_pos, h, w).\n            rcnn_train_cfg (dict): Training config for R-CNN part.\n\n        Returns:\n            Tensor: mask iou target (length == num positive).\n        \"\"\"\n        pos_proposals = [res.pos_bboxes for res in sampling_results]\n        pos_assigned_gt_inds = [\n            res.pos_assigned_gt_inds for res in sampling_results\n        ]\n\n        # compute the area ratio of gt areas inside the proposals and\n        # the whole instance\n        area_ratios = map(self._get_area_ratio, pos_proposals,\n                          pos_assigned_gt_inds, gt_masks)\n        area_ratios = torch.cat(list(area_ratios))\n        assert mask_targets.size(0) == area_ratios.size(0)\n\n        mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float()\n        mask_pred_areas = mask_pred.sum((-1, -2))\n\n        # mask_pred and mask_targets are binary maps\n        overlap_areas = (mask_pred * mask_targets).sum((-1, -2))\n\n        # compute the mask area of the whole instance\n        gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7)\n\n        mask_iou_targets = overlap_areas / (\n            mask_pred_areas + gt_full_areas - overlap_areas)\n        return mask_iou_targets\n\n    def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):\n        \"\"\"Compute area ratio of the gt mask inside the proposal and the gt\n        mask of the corresponding instance.\"\"\"\n        num_pos = pos_proposals.size(0)\n        if num_pos > 0:\n            area_ratios = []\n            proposals_np = pos_proposals.cpu().numpy()\n            pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()\n            # compute mask areas of gt instances (batch processing for speedup)\n            gt_instance_mask_area = gt_masks.areas\n            for i in range(num_pos):\n                gt_mask = gt_masks[pos_assigned_gt_inds[i]]\n\n                # crop the gt mask inside the proposal\n                bbox = proposals_np[i, :].astype(np.int32)\n                gt_mask_in_proposal = gt_mask.crop(bbox)\n\n                ratio = gt_mask_in_proposal.areas[0] / (\n                    gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)\n                area_ratios.append(ratio)\n            area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(\n                pos_proposals.device)\n        else:\n            area_ratios = pos_proposals.new_zeros((0, ))\n        return area_ratios\n\n    @force_fp32(apply_to=('mask_iou_pred', ))\n    def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):\n        \"\"\"Get the mask scores.\n\n        mask_score = bbox_score * mask_iou\n        \"\"\"\n        inds = range(det_labels.size(0))\n        mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1]\n        mask_scores = mask_scores.cpu().numpy()\n        det_labels = det_labels.cpu().numpy()\n        return [mask_scores[det_labels == i] for i in range(self.num_classes)]\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/scnet_mask_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.utils import ResLayer, SimplifiedBasicBlock\nfrom .fcn_mask_head import FCNMaskHead\n\n\n@HEADS.register_module()\nclass SCNetMaskHead(FCNMaskHead):\n    \"\"\"Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    Args:\n        conv_to_res (bool, optional): if True, change the conv layers to\n            ``SimplifiedBasicBlock``.\n    \"\"\"\n\n    def __init__(self, conv_to_res=True, **kwargs):\n        super(SCNetMaskHead, self).__init__(**kwargs)\n        self.conv_to_res = conv_to_res\n        if conv_to_res:\n            assert self.conv_kernel_size == 3\n            self.num_res_blocks = self.num_convs // 2\n            self.convs = ResLayer(\n                SimplifiedBasicBlock,\n                self.in_channels,\n                self.conv_out_channels,\n                self.num_res_blocks,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.models.builder import HEADS\nfrom mmdet.models.utils import ResLayer, SimplifiedBasicBlock\nfrom .fused_semantic_head import FusedSemanticHead\n\n\n@HEADS.register_module()\nclass SCNetSemanticHead(FusedSemanticHead):\n    \"\"\"Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    Args:\n        conv_to_res (bool, optional): if True, change the conv layers to\n            ``SimplifiedBasicBlock``.\n    \"\"\"\n\n    def __init__(self, conv_to_res=True, **kwargs):\n        super(SCNetSemanticHead, self).__init__(**kwargs)\n        self.conv_to_res = conv_to_res\n        if self.conv_to_res:\n            num_res_blocks = self.num_convs // 2\n            self.convs = ResLayer(\n                SimplifiedBasicBlock,\n                self.in_channels,\n                self.conv_out_channels,\n                num_res_blocks,\n                conv_cfg=self.conv_cfg,\n                norm_cfg=self.norm_cfg)\n            self.num_convs = num_res_blocks\n"
  },
  {
    "path": "mmdet/models/roi_heads/mask_scoring_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.core import bbox2roi\nfrom ..builder import HEADS, build_head\nfrom .standard_roi_head import StandardRoIHead\n\n\n@HEADS.register_module()\nclass MaskScoringRoIHead(StandardRoIHead):\n    \"\"\"Mask Scoring RoIHead for Mask Scoring RCNN.\n\n    https://arxiv.org/abs/1903.00241\n    \"\"\"\n\n    def __init__(self, mask_iou_head, **kwargs):\n        assert mask_iou_head is not None\n        super(MaskScoringRoIHead, self).__init__(**kwargs)\n        self.mask_iou_head = build_head(mask_iou_head)\n\n    def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,\n                            img_metas):\n        \"\"\"Run forward function and calculate loss for Mask head in\n        training.\"\"\"\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n        mask_results = super(MaskScoringRoIHead,\n                             self)._mask_forward_train(x, sampling_results,\n                                                       bbox_feats, gt_masks,\n                                                       img_metas)\n        if mask_results['loss_mask'] is None:\n            return mask_results\n\n        # mask iou head forward and loss\n        pos_mask_pred = mask_results['mask_pred'][\n            range(mask_results['mask_pred'].size(0)), pos_labels]\n        mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'],\n                                           pos_mask_pred)\n        pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),\n                                          pos_labels]\n\n        mask_iou_targets = self.mask_iou_head.get_targets(\n            sampling_results, gt_masks, pos_mask_pred,\n            mask_results['mask_targets'], self.train_cfg)\n        loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred,\n                                                mask_iou_targets)\n        mask_results['loss_mask'].update(loss_mask_iou)\n        return mask_results\n\n    def simple_test_mask(self,\n                         x,\n                         img_metas,\n                         det_bboxes,\n                         det_labels,\n                         rescale=False):\n        \"\"\"Obtain mask prediction without augmentation.\"\"\"\n        # image shapes of images in the batch\n        ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n        scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n        num_imgs = len(det_bboxes)\n        if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n            num_classes = self.mask_head.num_classes\n            segm_results = [[[] for _ in range(num_classes)]\n                            for _ in range(num_imgs)]\n            mask_scores = [[[] for _ in range(num_classes)]\n                           for _ in range(num_imgs)]\n        else:\n            # if det_bboxes is rescaled to the original image size, we need to\n            # rescale it back to the testing scale to obtain RoIs.\n            if rescale and not isinstance(scale_factors[0], float):\n                scale_factors = [\n                    torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n                    for scale_factor in scale_factors\n                ]\n            _bboxes = [\n                det_bboxes[i][:, :4] *\n                scale_factors[i] if rescale else det_bboxes[i]\n                for i in range(num_imgs)\n            ]\n            mask_rois = bbox2roi(_bboxes)\n            mask_results = self._mask_forward(x, mask_rois)\n            concat_det_labels = torch.cat(det_labels)\n            # get mask scores with mask iou head\n            mask_feats = mask_results['mask_feats']\n            mask_pred = mask_results['mask_pred']\n            mask_iou_pred = self.mask_iou_head(\n                mask_feats, mask_pred[range(concat_det_labels.size(0)),\n                                      concat_det_labels])\n            # split batch mask prediction back to each image\n            num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes)\n            mask_preds = mask_pred.split(num_bboxes_per_img, 0)\n            mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0)\n\n            # apply mask post-processing to each image individually\n            segm_results = []\n            mask_scores = []\n            for i in range(num_imgs):\n                if det_bboxes[i].shape[0] == 0:\n                    segm_results.append(\n                        [[] for _ in range(self.mask_head.num_classes)])\n                    mask_scores.append(\n                        [[] for _ in range(self.mask_head.num_classes)])\n                else:\n                    segm_result = self.mask_head.get_seg_masks(\n                        mask_preds[i], _bboxes[i], det_labels[i],\n                        self.test_cfg, ori_shapes[i], scale_factors[i],\n                        rescale)\n                    # get mask scores with mask iou head\n                    mask_score = self.mask_iou_head.get_mask_scores(\n                        mask_iou_preds[i], det_bboxes[i], det_labels[i])\n                    segm_results.append(segm_result)\n                    mask_scores.append(mask_score)\n        return list(zip(segm_results, mask_scores))\n"
  },
  {
    "path": "mmdet/models/roi_heads/pisa_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.core import bbox2roi\nfrom ..builder import HEADS\nfrom ..losses.pisa_loss import carl_loss, isr_p\nfrom .standard_roi_head import StandardRoIHead\n\n\n@HEADS.register_module()\nclass PISARoIHead(StandardRoIHead):\n    r\"\"\"The RoI head for `Prime Sample Attention in Object Detection\n    <https://arxiv.org/abs/1904.04821>`_.\"\"\"\n\n    def forward_train(self,\n                      x,\n                      img_metas,\n                      proposal_list,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None):\n        \"\"\"Forward function for training.\n\n        Args:\n            x (list[Tensor]): List of multi-level img features.\n            img_metas (list[dict]): List of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n            proposals (list[Tensors]): List of region proposals.\n            gt_bboxes (list[Tensor]): Each item are the truth boxes for each\n                image in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): Class indices corresponding to each box\n            gt_bboxes_ignore (list[Tensor], optional): Specify which bounding\n                boxes can be ignored when computing the loss.\n            gt_masks (None | Tensor) : True segmentation masks for each box\n                used if the architecture supports a segmentation task.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        # assign gts and sample proposals\n        if self.with_bbox or self.with_mask:\n            num_imgs = len(img_metas)\n            if gt_bboxes_ignore is None:\n                gt_bboxes_ignore = [None for _ in range(num_imgs)]\n            sampling_results = []\n            neg_label_weights = []\n            for i in range(num_imgs):\n                assign_result = self.bbox_assigner.assign(\n                    proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],\n                    gt_labels[i])\n                sampling_result = self.bbox_sampler.sample(\n                    assign_result,\n                    proposal_list[i],\n                    gt_bboxes[i],\n                    gt_labels[i],\n                    feats=[lvl_feat[i][None] for lvl_feat in x])\n                # neg label weight is obtained by sampling when using ISR-N\n                neg_label_weight = None\n                if isinstance(sampling_result, tuple):\n                    sampling_result, neg_label_weight = sampling_result\n                sampling_results.append(sampling_result)\n                neg_label_weights.append(neg_label_weight)\n\n        losses = dict()\n        # bbox head forward and loss\n        if self.with_bbox:\n            bbox_results = self._bbox_forward_train(\n                x,\n                sampling_results,\n                gt_bboxes,\n                gt_labels,\n                img_metas,\n                neg_label_weights=neg_label_weights)\n            losses.update(bbox_results['loss_bbox'])\n\n        # mask head forward and loss\n        if self.with_mask:\n            mask_results = self._mask_forward_train(x, sampling_results,\n                                                    bbox_results['bbox_feats'],\n                                                    gt_masks, img_metas)\n            losses.update(mask_results['loss_mask'])\n\n        return losses\n\n    def _bbox_forward(self, x, rois):\n        \"\"\"Box forward function used in both training and testing.\"\"\"\n        # TODO: a more flexible way to decide which feature maps to use\n        bbox_feats = self.bbox_roi_extractor(\n            x[:self.bbox_roi_extractor.num_inputs], rois)\n        if self.with_shared_head:\n            bbox_feats = self.shared_head(bbox_feats)\n        cls_score, bbox_pred = self.bbox_head(bbox_feats)\n\n        bbox_results = dict(\n            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n        return bbox_results\n\n    def _bbox_forward_train(self,\n                            x,\n                            sampling_results,\n                            gt_bboxes,\n                            gt_labels,\n                            img_metas,\n                            neg_label_weights=None):\n        \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n        rois = bbox2roi([res.bboxes for res in sampling_results])\n\n        bbox_results = self._bbox_forward(x, rois)\n\n        bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,\n                                                  gt_labels, self.train_cfg)\n\n        # neg_label_weights obtained by sampler is image-wise, mapping back to\n        # the corresponding location in label weights\n        if neg_label_weights[0] is not None:\n            label_weights = bbox_targets[1]\n            cur_num_rois = 0\n            for i in range(len(sampling_results)):\n                num_pos = sampling_results[i].pos_inds.size(0)\n                num_neg = sampling_results[i].neg_inds.size(0)\n                label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos +\n                              num_neg] = neg_label_weights[i]\n                cur_num_rois += num_pos + num_neg\n\n        cls_score = bbox_results['cls_score']\n        bbox_pred = bbox_results['bbox_pred']\n\n        # Apply ISR-P\n        isr_cfg = self.train_cfg.get('isr', None)\n        if isr_cfg is not None:\n            bbox_targets = isr_p(\n                cls_score,\n                bbox_pred,\n                bbox_targets,\n                rois,\n                sampling_results,\n                self.bbox_head.loss_cls,\n                self.bbox_head.bbox_coder,\n                **isr_cfg,\n                num_class=self.bbox_head.num_classes)\n        loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois,\n                                        *bbox_targets)\n\n        # Add CARL Loss\n        carl_cfg = self.train_cfg.get('carl', None)\n        if carl_cfg is not None:\n            loss_carl = carl_loss(\n                cls_score,\n                bbox_targets[0],\n                bbox_pred,\n                bbox_targets[2],\n                self.bbox_head.loss_bbox,\n                **carl_cfg,\n                num_class=self.bbox_head.num_classes)\n            loss_bbox.update(loss_carl)\n\n        bbox_results.update(loss_bbox=loss_bbox)\n        return bbox_results\n"
  },
  {
    "path": "mmdet/models/roi_heads/point_rend_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend  # noqa\nimport os\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.ops import point_sample, rel_roi_point_to_rel_img_point\n\nfrom mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks\nfrom .. import builder\nfrom ..builder import HEADS\nfrom .standard_roi_head import StandardRoIHead\n\n\n@HEADS.register_module()\nclass PointRendRoIHead(StandardRoIHead):\n    \"\"\"`PointRend <https://arxiv.org/abs/1912.08193>`_.\"\"\"\n\n    def __init__(self, point_head, *args, **kwargs):\n        super().__init__(*args, **kwargs)\n        assert self.with_bbox and self.with_mask\n        self.init_point_head(point_head)\n\n    def init_point_head(self, point_head):\n        \"\"\"Initialize ``point_head``\"\"\"\n        self.point_head = builder.build_head(point_head)\n\n    def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,\n                            img_metas):\n        \"\"\"Run forward function and calculate loss for mask head and point head\n        in training.\"\"\"\n        mask_results = super()._mask_forward_train(x, sampling_results,\n                                                   bbox_feats, gt_masks,\n                                                   img_metas)\n        if mask_results['loss_mask'] is not None:\n            loss_point = self._mask_point_forward_train(\n                x, sampling_results, mask_results['mask_pred'], gt_masks,\n                img_metas)\n            mask_results['loss_mask'].update(loss_point)\n\n        return mask_results\n\n    def _mask_point_forward_train(self, x, sampling_results, mask_pred,\n                                  gt_masks, img_metas):\n        \"\"\"Run forward function and calculate loss for point head in\n        training.\"\"\"\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n        rel_roi_points = self.point_head.get_roi_rel_points_train(\n            mask_pred, pos_labels, cfg=self.train_cfg)\n        rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n\n        fine_grained_point_feats = self._get_fine_grained_point_feats(\n            x, rois, rel_roi_points, img_metas)\n        coarse_point_feats = point_sample(mask_pred, rel_roi_points)\n        mask_point_pred = self.point_head(fine_grained_point_feats,\n                                          coarse_point_feats)\n        mask_point_target = self.point_head.get_targets(\n            rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg)\n        loss_mask_point = self.point_head.loss(mask_point_pred,\n                                               mask_point_target, pos_labels)\n\n        return loss_mask_point\n\n    def _get_fine_grained_point_feats(self, x, rois, rel_roi_points,\n                                      img_metas):\n        \"\"\"Sample fine grained feats from each level feature map and\n        concatenate them together.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            rois (Tensor): shape (num_rois, 5).\n            rel_roi_points (Tensor): A tensor of shape (num_rois, num_points,\n                2) that contains [0, 1] x [0, 1] normalized coordinates of the\n                most uncertain points from the [mask_height, mask_width] grid.\n            img_metas (list[dict]): Image meta info.\n\n        Returns:\n            Tensor: The fine grained features for each points,\n                has shape (num_rois, feats_channels, num_points).\n        \"\"\"\n        num_imgs = len(img_metas)\n        fine_grained_feats = []\n        for idx in range(self.mask_roi_extractor.num_inputs):\n            feats = x[idx]\n            spatial_scale = 1. / float(\n                self.mask_roi_extractor.featmap_strides[idx])\n            point_feats = []\n            for batch_ind in range(num_imgs):\n                # unravel batch dim\n                feat = feats[batch_ind].unsqueeze(0)\n                inds = (rois[:, 0].long() == batch_ind)\n                if inds.any():\n                    rel_img_points = rel_roi_point_to_rel_img_point(\n                        rois[inds], rel_roi_points[inds], feat.shape[2:],\n                        spatial_scale).unsqueeze(0)\n                    point_feat = point_sample(feat, rel_img_points)\n                    point_feat = point_feat.squeeze(0).transpose(0, 1)\n                    point_feats.append(point_feat)\n            fine_grained_feats.append(torch.cat(point_feats, dim=0))\n        return torch.cat(fine_grained_feats, dim=1)\n\n    def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,\n                                 img_metas):\n        \"\"\"Mask refining process with point head in testing.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            rois (Tensor): shape (num_rois, 5).\n            label_pred (Tensor): The predication class for each rois.\n            mask_pred (Tensor): The predication coarse masks of\n                shape (num_rois, num_classes, small_size, small_size).\n            img_metas (list[dict]): Image meta info.\n\n        Returns:\n            Tensor: The refined masks of shape (num_rois, num_classes,\n                large_size, large_size).\n        \"\"\"\n        refined_mask_pred = mask_pred.clone()\n        for subdivision_step in range(self.test_cfg.subdivision_steps):\n            refined_mask_pred = F.interpolate(\n                refined_mask_pred,\n                scale_factor=self.test_cfg.scale_factor,\n                mode='bilinear',\n                align_corners=False)\n            # If `subdivision_num_points` is larger or equal to the\n            # resolution of the next step, then we can skip this step\n            num_rois, channels, mask_height, mask_width = \\\n                refined_mask_pred.shape\n            if (self.test_cfg.subdivision_num_points >=\n                    self.test_cfg.scale_factor**2 * mask_height * mask_width\n                    and\n                    subdivision_step < self.test_cfg.subdivision_steps - 1):\n                continue\n            point_indices, rel_roi_points = \\\n                self.point_head.get_roi_rel_points_test(\n                    refined_mask_pred, label_pred, cfg=self.test_cfg)\n            fine_grained_point_feats = self._get_fine_grained_point_feats(\n                x, rois, rel_roi_points, img_metas)\n            coarse_point_feats = point_sample(mask_pred, rel_roi_points)\n            mask_point_pred = self.point_head(fine_grained_point_feats,\n                                              coarse_point_feats)\n\n            point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)\n            refined_mask_pred = refined_mask_pred.reshape(\n                num_rois, channels, mask_height * mask_width)\n            refined_mask_pred = refined_mask_pred.scatter_(\n                2, point_indices, mask_point_pred)\n            refined_mask_pred = refined_mask_pred.view(num_rois, channels,\n                                                       mask_height, mask_width)\n\n        return refined_mask_pred\n\n    def simple_test_mask(self,\n                         x,\n                         img_metas,\n                         det_bboxes,\n                         det_labels,\n                         rescale=False):\n        \"\"\"Obtain mask prediction without augmentation.\"\"\"\n        ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n        scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n        if isinstance(scale_factors[0], float):\n            warnings.warn(\n                'Scale factor in img_metas should be a '\n                'ndarray with shape (4,) '\n                'arrange as (factor_w, factor_h, factor_w, factor_h), '\n                'The scale_factor with float type has been deprecated. ')\n            scale_factors = np.array([scale_factors] * 4, dtype=np.float32)\n\n        num_imgs = len(det_bboxes)\n        if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n            segm_results = [[[] for _ in range(self.mask_head.num_classes)]\n                            for _ in range(num_imgs)]\n        else:\n            # if det_bboxes is rescaled to the original image size, we need to\n            # rescale it back to the testing scale to obtain RoIs.\n            _bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))]\n            if rescale:\n                scale_factors = [\n                    torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n                    for scale_factor in scale_factors\n                ]\n                _bboxes = [\n                    _bboxes[i] * scale_factors[i] for i in range(len(_bboxes))\n                ]\n\n            mask_rois = bbox2roi(_bboxes)\n            mask_results = self._mask_forward(x, mask_rois)\n            # split batch mask prediction back to each image\n            mask_pred = mask_results['mask_pred']\n            num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]\n            mask_preds = mask_pred.split(num_mask_roi_per_img, 0)\n            mask_rois = mask_rois.split(num_mask_roi_per_img, 0)\n\n            # apply mask post-processing to each image individually\n            segm_results = []\n            for i in range(num_imgs):\n                if det_bboxes[i].shape[0] == 0:\n                    segm_results.append(\n                        [[] for _ in range(self.mask_head.num_classes)])\n                else:\n                    x_i = [xx[[i]] for xx in x]\n                    mask_rois_i = mask_rois[i]\n                    mask_rois_i[:, 0] = 0  # TODO: remove this hack\n                    mask_pred_i = self._mask_point_forward_test(\n                        x_i, mask_rois_i, det_labels[i], mask_preds[i],\n                        [img_metas])\n                    segm_result = self.mask_head.get_seg_masks(\n                        mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg,\n                        ori_shapes[i], scale_factors[i], rescale)\n                    segm_results.append(segm_result)\n        return segm_results\n\n    def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):\n        \"\"\"Test for mask head with test time augmentation.\"\"\"\n        if det_bboxes.shape[0] == 0:\n            segm_result = [[] for _ in range(self.mask_head.num_classes)]\n        else:\n            aug_masks = []\n            for x, img_meta in zip(feats, img_metas):\n                img_shape = img_meta[0]['img_shape']\n                scale_factor = img_meta[0]['scale_factor']\n                flip = img_meta[0]['flip']\n                _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n                                       scale_factor, flip)\n                mask_rois = bbox2roi([_bboxes])\n                mask_results = self._mask_forward(x, mask_rois)\n                mask_results['mask_pred'] = self._mask_point_forward_test(\n                    x, mask_rois, det_labels, mask_results['mask_pred'],\n                    img_meta)\n                # convert to numpy array to save memory\n                aug_masks.append(\n                    mask_results['mask_pred'].sigmoid().cpu().numpy())\n            merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)\n\n            ori_shape = img_metas[0][0]['ori_shape']\n            segm_result = self.mask_head.get_seg_masks(\n                merged_masks,\n                det_bboxes,\n                det_labels,\n                self.test_cfg,\n                ori_shape,\n                scale_factor=1.0,\n                rescale=False)\n        return segm_result\n\n    def _onnx_get_fine_grained_point_feats(self, x, rois, rel_roi_points):\n        \"\"\"Export the process of sampling fine grained feats to onnx.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            rois (Tensor): shape (num_rois, 5).\n            rel_roi_points (Tensor): A tensor of shape (num_rois, num_points,\n                2) that contains [0, 1] x [0, 1] normalized coordinates of the\n                most uncertain points from the [mask_height, mask_width] grid.\n\n        Returns:\n            Tensor: The fine grained features for each points,\n                has shape (num_rois, feats_channels, num_points).\n        \"\"\"\n        batch_size = x[0].shape[0]\n        num_rois = rois.shape[0]\n        fine_grained_feats = []\n        for idx in range(self.mask_roi_extractor.num_inputs):\n            feats = x[idx]\n            spatial_scale = 1. / float(\n                self.mask_roi_extractor.featmap_strides[idx])\n\n            rel_img_points = rel_roi_point_to_rel_img_point(\n                rois, rel_roi_points, feats, spatial_scale)\n            channels = feats.shape[1]\n            num_points = rel_img_points.shape[1]\n            rel_img_points = rel_img_points.reshape(batch_size, -1, num_points,\n                                                    2)\n            point_feats = point_sample(feats, rel_img_points)\n            point_feats = point_feats.transpose(1, 2).reshape(\n                num_rois, channels, num_points)\n            fine_grained_feats.append(point_feats)\n        return torch.cat(fine_grained_feats, dim=1)\n\n    def _mask_point_onnx_export(self, x, rois, label_pred, mask_pred):\n        \"\"\"Export mask refining process with point head to onnx.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            rois (Tensor): shape (num_rois, 5).\n            label_pred (Tensor): The predication class for each rois.\n            mask_pred (Tensor): The predication coarse masks of\n                shape (num_rois, num_classes, small_size, small_size).\n\n        Returns:\n            Tensor: The refined masks of shape (num_rois, num_classes,\n                large_size, large_size).\n        \"\"\"\n        refined_mask_pred = mask_pred.clone()\n        for subdivision_step in range(self.test_cfg.subdivision_steps):\n            refined_mask_pred = F.interpolate(\n                refined_mask_pred,\n                scale_factor=self.test_cfg.scale_factor,\n                mode='bilinear',\n                align_corners=False)\n            # If `subdivision_num_points` is larger or equal to the\n            # resolution of the next step, then we can skip this step\n            num_rois, channels, mask_height, mask_width = \\\n                refined_mask_pred.shape\n            if (self.test_cfg.subdivision_num_points >=\n                    self.test_cfg.scale_factor**2 * mask_height * mask_width\n                    and\n                    subdivision_step < self.test_cfg.subdivision_steps - 1):\n                continue\n            point_indices, rel_roi_points = \\\n                self.point_head.get_roi_rel_points_test(\n                    refined_mask_pred, label_pred, cfg=self.test_cfg)\n            fine_grained_point_feats = self._onnx_get_fine_grained_point_feats(\n                x, rois, rel_roi_points)\n            coarse_point_feats = point_sample(mask_pred, rel_roi_points)\n            mask_point_pred = self.point_head(fine_grained_point_feats,\n                                              coarse_point_feats)\n\n            point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)\n            refined_mask_pred = refined_mask_pred.reshape(\n                num_rois, channels, mask_height * mask_width)\n\n            is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT'\n            # avoid ScatterElements op in ONNX for TensorRT\n            if is_trt_backend:\n                mask_shape = refined_mask_pred.shape\n                point_shape = point_indices.shape\n                inds_dim0 = torch.arange(point_shape[0]).reshape(\n                    point_shape[0], 1, 1).expand_as(point_indices)\n                inds_dim1 = torch.arange(point_shape[1]).reshape(\n                    1, point_shape[1], 1).expand_as(point_indices)\n                inds_1d = inds_dim0.reshape(\n                    -1) * mask_shape[1] * mask_shape[2] + inds_dim1.reshape(\n                        -1) * mask_shape[2] + point_indices.reshape(-1)\n                refined_mask_pred = refined_mask_pred.reshape(-1)\n                refined_mask_pred[inds_1d] = mask_point_pred.reshape(-1)\n                refined_mask_pred = refined_mask_pred.reshape(*mask_shape)\n            else:\n                refined_mask_pred = refined_mask_pred.scatter_(\n                    2, point_indices, mask_point_pred)\n\n            refined_mask_pred = refined_mask_pred.view(num_rois, channels,\n                                                       mask_height, mask_width)\n\n        return refined_mask_pred\n\n    def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs):\n        \"\"\"Export mask branch to onnx which supports batch inference.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            img_metas (list[dict]): Image meta info.\n            det_bboxes (Tensor): Bboxes and corresponding scores.\n                has shape [N, num_bboxes, 5].\n            det_labels (Tensor): class labels of\n                shape [N, num_bboxes].\n\n        Returns:\n            Tensor: The segmentation results of shape [N, num_bboxes,\n                image_height, image_width].\n        \"\"\"\n        if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n            raise RuntimeError('[ONNX Error] Can not record MaskHead '\n                               'as it has not been executed this time')\n        batch_size = det_bboxes.size(0)\n        # if det_bboxes is rescaled to the original image size, we need to\n        # rescale it back to the testing scale to obtain RoIs.\n        det_bboxes = det_bboxes[..., :4]\n        batch_index = torch.arange(\n            det_bboxes.size(0), device=det_bboxes.device).float().view(\n                -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)\n        mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)\n        mask_rois = mask_rois.view(-1, 5)\n        mask_results = self._mask_forward(x, mask_rois)\n        mask_pred = mask_results['mask_pred']\n        max_shape = img_metas[0]['img_shape_for_onnx']\n        num_det = det_bboxes.shape[1]\n        det_bboxes = det_bboxes.reshape(-1, 4)\n        det_labels = det_labels.reshape(-1)\n\n        mask_pred = self._mask_point_onnx_export(x, mask_rois, det_labels,\n                                                 mask_pred)\n\n        segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes,\n                                                  det_labels, self.test_cfg,\n                                                  max_shape)\n        segm_results = segm_results.reshape(batch_size, num_det, max_shape[0],\n                                            max_shape[1])\n        return segm_results\n"
  },
  {
    "path": "mmdet/models/roi_heads/roi_extractors/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_roi_extractor import BaseRoIExtractor\nfrom .generic_roi_extractor import GenericRoIExtractor\nfrom .single_level_roi_extractor import SingleRoIExtractor\n\n__all__ = ['BaseRoIExtractor', 'SingleRoIExtractor', 'GenericRoIExtractor']\n"
  },
  {
    "path": "mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nimport torch.nn as nn\nfrom mmcv import ops\nfrom mmcv.runner import BaseModule\n\n\nclass BaseRoIExtractor(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for RoI extractor.\n\n    Args:\n        roi_layer (dict): Specify RoI layer type and arguments.\n        out_channels (int): Output channels of RoI layers.\n        featmap_strides (int): Strides of input feature maps.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 roi_layer,\n                 out_channels,\n                 featmap_strides,\n                 init_cfg=None):\n        super(BaseRoIExtractor, self).__init__(init_cfg)\n        self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)\n        self.out_channels = out_channels\n        self.featmap_strides = featmap_strides\n        self.fp16_enabled = False\n\n    @property\n    def num_inputs(self):\n        \"\"\"int: Number of input feature maps.\"\"\"\n        return len(self.featmap_strides)\n\n    def build_roi_layers(self, layer_cfg, featmap_strides):\n        \"\"\"Build RoI operator to extract feature from each level feature map.\n\n        Args:\n            layer_cfg (dict): Dictionary to construct and config RoI layer\n                operation. Options are modules under ``mmcv/ops`` such as\n                ``RoIAlign``.\n            featmap_strides (List[int]): The stride of input feature map w.r.t\n                to the original image size, which would be used to scale RoI\n                coordinate (original image coordinate system) to feature\n                coordinate system.\n\n        Returns:\n            nn.ModuleList: The RoI extractor modules for each level feature\n                map.\n        \"\"\"\n\n        cfg = layer_cfg.copy()\n        layer_type = cfg.pop('type')\n        assert hasattr(ops, layer_type)\n        layer_cls = getattr(ops, layer_type)\n        roi_layers = nn.ModuleList(\n            [layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])\n        return roi_layers\n\n    def roi_rescale(self, rois, scale_factor):\n        \"\"\"Scale RoI coordinates by scale factor.\n\n        Args:\n            rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)\n            scale_factor (float): Scale factor that RoI will be multiplied by.\n\n        Returns:\n            torch.Tensor: Scaled RoI.\n        \"\"\"\n\n        cx = (rois[:, 1] + rois[:, 3]) * 0.5\n        cy = (rois[:, 2] + rois[:, 4]) * 0.5\n        w = rois[:, 3] - rois[:, 1]\n        h = rois[:, 4] - rois[:, 2]\n        new_w = w * scale_factor\n        new_h = h * scale_factor\n        x1 = cx - new_w * 0.5\n        x2 = cx + new_w * 0.5\n        y1 = cy - new_h * 0.5\n        y2 = cy + new_h * 0.5\n        new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)\n        return new_rois\n\n    @abstractmethod\n    def forward(self, feats, rois, roi_scale_factor=None):\n        pass\n"
  },
  {
    "path": "mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.cnn.bricks import build_plugin_layer\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.models.builder import ROI_EXTRACTORS\nfrom .base_roi_extractor import BaseRoIExtractor\n\n\n@ROI_EXTRACTORS.register_module()\nclass GenericRoIExtractor(BaseRoIExtractor):\n    \"\"\"Extract RoI features from all level feature maps levels.\n\n    This is the implementation of `A novel Region of Interest Extraction Layer\n    for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.\n\n    Args:\n        aggregation (str): The method to aggregate multiple feature maps.\n            Options are 'sum', 'concat'. Default: 'sum'.\n        pre_cfg (dict | None): Specify pre-processing modules. Default: None.\n        post_cfg (dict | None): Specify post-processing modules. Default: None.\n        kwargs (keyword arguments): Arguments that are the same\n            as :class:`BaseRoIExtractor`.\n    \"\"\"\n\n    def __init__(self,\n                 aggregation='sum',\n                 pre_cfg=None,\n                 post_cfg=None,\n                 **kwargs):\n        super(GenericRoIExtractor, self).__init__(**kwargs)\n\n        assert aggregation in ['sum', 'concat']\n\n        self.aggregation = aggregation\n        self.with_post = post_cfg is not None\n        self.with_pre = pre_cfg is not None\n        # build pre/post processing modules\n        if self.with_post:\n            self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]\n        if self.with_pre:\n            self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]\n\n    @force_fp32(apply_to=('feats', ), out_fp16=True)\n    def forward(self, feats, rois, roi_scale_factor=None):\n        \"\"\"Forward function.\"\"\"\n        if len(feats) == 1:\n            return self.roi_layers[0](feats[0], rois)\n\n        out_size = self.roi_layers[0].output_size\n        num_levels = len(feats)\n        roi_feats = feats[0].new_zeros(\n            rois.size(0), self.out_channels, *out_size)\n\n        # some times rois is an empty tensor\n        if roi_feats.shape[0] == 0:\n            return roi_feats\n\n        if roi_scale_factor is not None:\n            rois = self.roi_rescale(rois, roi_scale_factor)\n\n        # mark the starting channels for concat mode\n        start_channels = 0\n        for i in range(num_levels):\n            roi_feats_t = self.roi_layers[i](feats[i], rois)\n            end_channels = start_channels + roi_feats_t.size(1)\n            if self.with_pre:\n                # apply pre-processing to a RoI extracted from each layer\n                roi_feats_t = self.pre_module(roi_feats_t)\n            if self.aggregation == 'sum':\n                # and sum them all\n                roi_feats = roi_feats + roi_feats_t\n            else:\n                # and concat them along channel dimension\n                roi_feats[:, start_channels:end_channels] = roi_feats_t\n            # update channels starting position\n            start_channels = end_channels\n        # check if concat channels match at the end\n        if self.aggregation == 'concat':\n            assert start_channels == self.out_channels\n\n        if self.with_post:\n            # apply post-processing before return the result\n            roi_feats = self.post_module(roi_feats)\n        return roi_feats\n"
  },
  {
    "path": "mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.models.builder import ROI_EXTRACTORS\nfrom .base_roi_extractor import BaseRoIExtractor\n\n\n@ROI_EXTRACTORS.register_module()\nclass SingleRoIExtractor(BaseRoIExtractor):\n    \"\"\"Extract RoI features from a single level feature map.\n\n    If there are multiple input feature levels, each RoI is mapped to a level\n    according to its scale. The mapping rule is proposed in\n    `FPN <https://arxiv.org/abs/1612.03144>`_.\n\n    Args:\n        roi_layer (dict): Specify RoI layer type and arguments.\n        out_channels (int): Output channels of RoI layers.\n        featmap_strides (List[int]): Strides of input feature maps.\n        finest_scale (int): Scale threshold of mapping to level 0. Default: 56.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 roi_layer,\n                 out_channels,\n                 featmap_strides,\n                 finest_scale=56,\n                 init_cfg=None):\n        super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,\n                                                 featmap_strides, init_cfg)\n        self.finest_scale = finest_scale\n\n    def map_roi_levels(self, rois, num_levels):\n        \"\"\"Map rois to corresponding feature levels by scales.\n\n        - scale < finest_scale * 2: level 0\n        - finest_scale * 2 <= scale < finest_scale * 4: level 1\n        - finest_scale * 4 <= scale < finest_scale * 8: level 2\n        - scale >= finest_scale * 8: level 3\n\n        Args:\n            rois (Tensor): Input RoIs, shape (k, 5).\n            num_levels (int): Total level number.\n\n        Returns:\n            Tensor: Level index (0-based) of each RoI, shape (k, )\n        \"\"\"\n        scale = torch.sqrt(\n            (rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))\n        target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))\n        target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()\n        return target_lvls\n\n    @force_fp32(apply_to=('feats', ), out_fp16=True)\n    def forward(self, feats, rois, roi_scale_factor=None):\n        \"\"\"Forward function.\"\"\"\n        out_size = self.roi_layers[0].output_size\n        num_levels = len(feats)\n        expand_dims = (-1, self.out_channels * out_size[0] * out_size[1])\n        if torch.onnx.is_in_onnx_export():\n            # Work around to export mask-rcnn to onnx\n            roi_feats = rois[:, :1].clone().detach()\n            roi_feats = roi_feats.expand(*expand_dims)\n            roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size)\n            roi_feats = roi_feats * 0\n        else:\n            roi_feats = feats[0].new_zeros(\n                rois.size(0), self.out_channels, *out_size)\n\n        if num_levels == 1:\n            if len(rois) == 0:\n                return roi_feats\n            return self.roi_layers[0](feats[0], rois)\n\n        target_lvls = self.map_roi_levels(rois, num_levels)\n\n        if roi_scale_factor is not None:\n            rois = self.roi_rescale(rois, roi_scale_factor)\n\n        for i in range(num_levels):\n            mask = target_lvls == i\n            if torch.onnx.is_in_onnx_export():\n                # To keep all roi_align nodes exported to onnx\n                # and skip nonzero op\n                mask = mask.float().unsqueeze(-1)\n                # select target level rois and reset the rest rois to zero.\n                rois_i = rois.clone().detach()\n                rois_i = rois_i * mask\n                mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape)\n                roi_feats_t = self.roi_layers[i](feats[i], rois_i)\n                roi_feats_t = roi_feats_t * mask_exp\n                roi_feats = roi_feats + roi_feats_t\n                continue\n            inds = mask.nonzero(as_tuple=False).squeeze(1)\n            if inds.numel() > 0:\n                rois_ = rois[inds]\n                roi_feats_t = self.roi_layers[i](feats[i], rois_)\n                roi_feats[inds] = roi_feats_t\n            else:\n                # Sometimes some pyramid levels will not be used for RoI\n                # feature extraction and this will cause an incomplete\n                # computation graph in one GPU, which is different from those\n                # in other GPUs and will cause a hanging error.\n                # Therefore, we add it to ensure each feature pyramid is\n                # included in the computation graph to avoid runtime bugs.\n                roi_feats = roi_feats + sum(\n                    x.view(-1)[0]\n                    for x in self.parameters()) * 0. + feats[i].sum() * 0.\n        return roi_feats\n"
  },
  {
    "path": "mmdet/models/roi_heads/scnet_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\n\nfrom mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,\n                        merge_aug_masks, multiclass_nms)\nfrom ..builder import HEADS, build_head, build_roi_extractor\nfrom ..utils.brick_wrappers import adaptive_avg_pool2d\nfrom .cascade_roi_head import CascadeRoIHead\n\n\n@HEADS.register_module()\nclass SCNetRoIHead(CascadeRoIHead):\n    \"\"\"RoIHead for `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    Args:\n        num_stages (int): number of cascade stages.\n        stage_loss_weights (list): loss weight of cascade stages.\n        semantic_roi_extractor (dict): config to init semantic roi extractor.\n        semantic_head (dict): config to init semantic head.\n        feat_relay_head (dict): config to init feature_relay_head.\n        glbctx_head (dict): config to init global context head.\n    \"\"\"\n\n    def __init__(self,\n                 num_stages,\n                 stage_loss_weights,\n                 semantic_roi_extractor=None,\n                 semantic_head=None,\n                 feat_relay_head=None,\n                 glbctx_head=None,\n                 **kwargs):\n        super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights,\n                                           **kwargs)\n        assert self.with_bbox and self.with_mask\n        assert not self.with_shared_head  # shared head is not supported\n\n        if semantic_head is not None:\n            self.semantic_roi_extractor = build_roi_extractor(\n                semantic_roi_extractor)\n            self.semantic_head = build_head(semantic_head)\n\n        if feat_relay_head is not None:\n            self.feat_relay_head = build_head(feat_relay_head)\n\n        if glbctx_head is not None:\n            self.glbctx_head = build_head(glbctx_head)\n\n    def init_mask_head(self, mask_roi_extractor, mask_head):\n        \"\"\"Initialize ``mask_head``\"\"\"\n        if mask_roi_extractor is not None:\n            self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)\n            self.mask_head = build_head(mask_head)\n\n    @property\n    def with_semantic(self):\n        \"\"\"bool: whether the head has semantic head\"\"\"\n        return hasattr(self,\n                       'semantic_head') and self.semantic_head is not None\n\n    @property\n    def with_feat_relay(self):\n        \"\"\"bool: whether the head has feature relay head\"\"\"\n        return (hasattr(self, 'feat_relay_head')\n                and self.feat_relay_head is not None)\n\n    @property\n    def with_glbctx(self):\n        \"\"\"bool: whether the head has global context head\"\"\"\n        return hasattr(self, 'glbctx_head') and self.glbctx_head is not None\n\n    def _fuse_glbctx(self, roi_feats, glbctx_feat, rois):\n        \"\"\"Fuse global context feats with roi feats.\"\"\"\n        assert roi_feats.size(0) == rois.size(0)\n        img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long()\n        fused_feats = torch.zeros_like(roi_feats)\n        for img_id in img_inds:\n            inds = (rois[:, 0] == img_id.item())\n            fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id]\n        return fused_feats\n\n    def _slice_pos_feats(self, feats, sampling_results):\n        \"\"\"Get features from pos rois.\"\"\"\n        num_rois = [res.bboxes.size(0) for res in sampling_results]\n        num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results]\n        inds = torch.zeros(sum(num_rois), dtype=torch.bool)\n        start = 0\n        for i in range(len(num_rois)):\n            start = 0 if i == 0 else start + num_rois[i - 1]\n            stop = start + num_pos_rois[i]\n            inds[start:stop] = 1\n        sliced_feats = feats[inds]\n        return sliced_feats\n\n    def _bbox_forward(self,\n                      stage,\n                      x,\n                      rois,\n                      semantic_feat=None,\n                      glbctx_feat=None):\n        \"\"\"Box head forward function used in both training and testing.\"\"\"\n        bbox_roi_extractor = self.bbox_roi_extractor[stage]\n        bbox_head = self.bbox_head[stage]\n        bbox_feats = bbox_roi_extractor(\n            x[:len(bbox_roi_extractor.featmap_strides)], rois)\n        if self.with_semantic and semantic_feat is not None:\n            bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],\n                                                             rois)\n            if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:\n                bbox_semantic_feat = adaptive_avg_pool2d(\n                    bbox_semantic_feat, bbox_feats.shape[-2:])\n            bbox_feats = bbox_feats + bbox_semantic_feat\n        if self.with_glbctx and glbctx_feat is not None:\n            bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois)\n        cls_score, bbox_pred, relayed_feat = bbox_head(\n            bbox_feats, return_shared_feat=True)\n\n        bbox_results = dict(\n            cls_score=cls_score,\n            bbox_pred=bbox_pred,\n            relayed_feat=relayed_feat)\n        return bbox_results\n\n    def _mask_forward(self,\n                      x,\n                      rois,\n                      semantic_feat=None,\n                      glbctx_feat=None,\n                      relayed_feat=None):\n        \"\"\"Mask head forward function used in both training and testing.\"\"\"\n        mask_feats = self.mask_roi_extractor(\n            x[:self.mask_roi_extractor.num_inputs], rois)\n        if self.with_semantic and semantic_feat is not None:\n            mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],\n                                                             rois)\n            if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:\n                mask_semantic_feat = F.adaptive_avg_pool2d(\n                    mask_semantic_feat, mask_feats.shape[-2:])\n            mask_feats = mask_feats + mask_semantic_feat\n        if self.with_glbctx and glbctx_feat is not None:\n            mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois)\n        if self.with_feat_relay and relayed_feat is not None:\n            mask_feats = mask_feats + relayed_feat\n        mask_pred = self.mask_head(mask_feats)\n        mask_results = dict(mask_pred=mask_pred)\n\n        return mask_results\n\n    def _bbox_forward_train(self,\n                            stage,\n                            x,\n                            sampling_results,\n                            gt_bboxes,\n                            gt_labels,\n                            rcnn_train_cfg,\n                            semantic_feat=None,\n                            glbctx_feat=None):\n        \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n        bbox_head = self.bbox_head[stage]\n        rois = bbox2roi([res.bboxes for res in sampling_results])\n        bbox_results = self._bbox_forward(\n            stage,\n            x,\n            rois,\n            semantic_feat=semantic_feat,\n            glbctx_feat=glbctx_feat)\n\n        bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,\n                                             gt_labels, rcnn_train_cfg)\n        loss_bbox = bbox_head.loss(bbox_results['cls_score'],\n                                   bbox_results['bbox_pred'], rois,\n                                   *bbox_targets)\n\n        bbox_results.update(\n            loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)\n        return bbox_results\n\n    def _mask_forward_train(self,\n                            x,\n                            sampling_results,\n                            gt_masks,\n                            rcnn_train_cfg,\n                            semantic_feat=None,\n                            glbctx_feat=None,\n                            relayed_feat=None):\n        \"\"\"Run forward function and calculate loss for mask head in\n        training.\"\"\"\n        pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n        mask_results = self._mask_forward(\n            x,\n            pos_rois,\n            semantic_feat=semantic_feat,\n            glbctx_feat=glbctx_feat,\n            relayed_feat=relayed_feat)\n\n        mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,\n                                                  rcnn_train_cfg)\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n        loss_mask = self.mask_head.loss(mask_results['mask_pred'],\n                                        mask_targets, pos_labels)\n\n        mask_results = loss_mask\n        return mask_results\n\n    def forward_train(self,\n                      x,\n                      img_metas,\n                      proposal_list,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None,\n                      gt_semantic_seg=None):\n        \"\"\"\n        Args:\n            x (list[Tensor]): list of multi-level img features.\n            img_metas (list[dict]): list of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n            proposal_list (list[Tensors]): list of region proposals.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            gt_bboxes_ignore (None, list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n            gt_masks (None, Tensor) : true segmentation masks for each box\n                used if the architecture supports a segmentation task.\n            gt_semantic_seg (None, list[Tensor]): semantic segmentation masks\n                used if the architecture supports semantic segmentation task.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        losses = dict()\n\n        # semantic segmentation branch\n        if self.with_semantic:\n            semantic_pred, semantic_feat = self.semantic_head(x)\n            loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)\n            losses['loss_semantic_seg'] = loss_seg\n        else:\n            semantic_feat = None\n\n        # global context branch\n        if self.with_glbctx:\n            mc_pred, glbctx_feat = self.glbctx_head(x)\n            loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels)\n            losses['loss_glbctx'] = loss_glbctx\n        else:\n            glbctx_feat = None\n\n        for i in range(self.num_stages):\n            self.current_stage = i\n            rcnn_train_cfg = self.train_cfg[i]\n            lw = self.stage_loss_weights[i]\n\n            # assign gts and sample proposals\n            sampling_results = []\n            bbox_assigner = self.bbox_assigner[i]\n            bbox_sampler = self.bbox_sampler[i]\n            num_imgs = len(img_metas)\n            if gt_bboxes_ignore is None:\n                gt_bboxes_ignore = [None for _ in range(num_imgs)]\n\n            for j in range(num_imgs):\n                assign_result = bbox_assigner.assign(proposal_list[j],\n                                                     gt_bboxes[j],\n                                                     gt_bboxes_ignore[j],\n                                                     gt_labels[j])\n                sampling_result = bbox_sampler.sample(\n                    assign_result,\n                    proposal_list[j],\n                    gt_bboxes[j],\n                    gt_labels[j],\n                    feats=[lvl_feat[j][None] for lvl_feat in x])\n                sampling_results.append(sampling_result)\n\n            bbox_results = \\\n                self._bbox_forward_train(\n                    i, x, sampling_results, gt_bboxes, gt_labels,\n                    rcnn_train_cfg, semantic_feat, glbctx_feat)\n            roi_labels = bbox_results['bbox_targets'][0]\n\n            for name, value in bbox_results['loss_bbox'].items():\n                losses[f's{i}.{name}'] = (\n                    value * lw if 'loss' in name else value)\n\n            # refine boxes\n            if i < self.num_stages - 1:\n                pos_is_gts = [res.pos_is_gt for res in sampling_results]\n                with torch.no_grad():\n                    proposal_list = self.bbox_head[i].refine_bboxes(\n                        bbox_results['rois'], roi_labels,\n                        bbox_results['bbox_pred'], pos_is_gts, img_metas)\n\n        if self.with_feat_relay:\n            relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'],\n                                                 sampling_results)\n            relayed_feat = self.feat_relay_head(relayed_feat)\n        else:\n            relayed_feat = None\n\n        mask_results = self._mask_forward_train(x, sampling_results, gt_masks,\n                                                rcnn_train_cfg, semantic_feat,\n                                                glbctx_feat, relayed_feat)\n        mask_lw = sum(self.stage_loss_weights)\n        losses['loss_mask'] = mask_lw * mask_results['loss_mask']\n\n        return losses\n\n    def simple_test(self, x, proposal_list, img_metas, rescale=False):\n        \"\"\"Test without augmentation.\n\n        Args:\n            x (tuple[Tensor]): Features from upstream network. Each\n                has shape (batch_size, c, h, w).\n            proposal_list (list(Tensor)): Proposals from rpn head.\n                Each has shape (num_proposals, 5), last dimension\n                5 represent (x1, y1, x2, y2, score).\n            img_metas (list[dict]): Meta information of images.\n            rescale (bool): Whether to rescale the results to\n                the original image. Default: True.\n\n        Returns:\n            list[list[np.ndarray]] or list[tuple]: When no mask branch,\n            it is bbox results of each image and classes with type\n            `list[list[np.ndarray]]`. The outer list\n            corresponds to each image. The inner list\n            corresponds to each class. When the model has mask branch,\n            it contains bbox results and mask results.\n            The outer list corresponds to each image, and first element\n            of tuple is bbox results, second element is mask results.\n        \"\"\"\n        if self.with_semantic:\n            _, semantic_feat = self.semantic_head(x)\n        else:\n            semantic_feat = None\n\n        if self.with_glbctx:\n            mc_pred, glbctx_feat = self.glbctx_head(x)\n        else:\n            glbctx_feat = None\n\n        num_imgs = len(proposal_list)\n        img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n        ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n        scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n        # \"ms\" in variable names means multi-stage\n        ms_scores = []\n        rcnn_test_cfg = self.test_cfg\n\n        rois = bbox2roi(proposal_list)\n\n        if rois.shape[0] == 0:\n            # There is no proposal in the whole batch\n            bbox_results = [[\n                np.zeros((0, 5), dtype=np.float32)\n                for _ in range(self.bbox_head[-1].num_classes)\n            ]] * num_imgs\n\n            if self.with_mask:\n                mask_classes = self.mask_head.num_classes\n                segm_results = [[[] for _ in range(mask_classes)]\n                                for _ in range(num_imgs)]\n                results = list(zip(bbox_results, segm_results))\n            else:\n                results = bbox_results\n\n            return results\n\n        for i in range(self.num_stages):\n            bbox_head = self.bbox_head[i]\n            bbox_results = self._bbox_forward(\n                i,\n                x,\n                rois,\n                semantic_feat=semantic_feat,\n                glbctx_feat=glbctx_feat)\n            # split batch bbox prediction back to each image\n            cls_score = bbox_results['cls_score']\n            bbox_pred = bbox_results['bbox_pred']\n            num_proposals_per_img = tuple(len(p) for p in proposal_list)\n            rois = rois.split(num_proposals_per_img, 0)\n            cls_score = cls_score.split(num_proposals_per_img, 0)\n            bbox_pred = bbox_pred.split(num_proposals_per_img, 0)\n            ms_scores.append(cls_score)\n\n            if i < self.num_stages - 1:\n                refine_rois_list = []\n                for j in range(num_imgs):\n                    if rois[j].shape[0] > 0:\n                        bbox_label = cls_score[j][:, :-1].argmax(dim=1)\n                        refine_rois = bbox_head.regress_by_class(\n                            rois[j], bbox_label, bbox_pred[j], img_metas[j])\n                        refine_rois_list.append(refine_rois)\n                rois = torch.cat(refine_rois_list)\n\n        # average scores of each image by stages\n        cls_score = [\n            sum([score[i] for score in ms_scores]) / float(len(ms_scores))\n            for i in range(num_imgs)\n        ]\n\n        # apply bbox post-processing to each image individually\n        det_bboxes = []\n        det_labels = []\n        for i in range(num_imgs):\n            det_bbox, det_label = self.bbox_head[-1].get_bboxes(\n                rois[i],\n                cls_score[i],\n                bbox_pred[i],\n                img_shapes[i],\n                scale_factors[i],\n                rescale=rescale,\n                cfg=rcnn_test_cfg)\n            det_bboxes.append(det_bbox)\n            det_labels.append(det_label)\n        det_bbox_results = [\n            bbox2result(det_bboxes[i], det_labels[i],\n                        self.bbox_head[-1].num_classes)\n            for i in range(num_imgs)\n        ]\n\n        if self.with_mask:\n            if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n                mask_classes = self.mask_head.num_classes\n                det_segm_results = [[[] for _ in range(mask_classes)]\n                                    for _ in range(num_imgs)]\n            else:\n                if rescale and not isinstance(scale_factors[0], float):\n                    scale_factors = [\n                        torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n                        for scale_factor in scale_factors\n                    ]\n                _bboxes = [\n                    det_bboxes[i][:, :4] *\n                    scale_factors[i] if rescale else det_bboxes[i]\n                    for i in range(num_imgs)\n                ]\n                mask_rois = bbox2roi(_bboxes)\n\n                # get relay feature on mask_rois\n                bbox_results = self._bbox_forward(\n                    -1,\n                    x,\n                    mask_rois,\n                    semantic_feat=semantic_feat,\n                    glbctx_feat=glbctx_feat)\n                relayed_feat = bbox_results['relayed_feat']\n                relayed_feat = self.feat_relay_head(relayed_feat)\n\n                mask_results = self._mask_forward(\n                    x,\n                    mask_rois,\n                    semantic_feat=semantic_feat,\n                    glbctx_feat=glbctx_feat,\n                    relayed_feat=relayed_feat)\n                mask_pred = mask_results['mask_pred']\n\n                # split batch mask prediction back to each image\n                num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)\n                mask_preds = mask_pred.split(num_bbox_per_img, 0)\n\n                # apply mask post-processing to each image individually\n                det_segm_results = []\n                for i in range(num_imgs):\n                    if det_bboxes[i].shape[0] == 0:\n                        det_segm_results.append(\n                            [[] for _ in range(self.mask_head.num_classes)])\n                    else:\n                        segm_result = self.mask_head.get_seg_masks(\n                            mask_preds[i], _bboxes[i], det_labels[i],\n                            self.test_cfg, ori_shapes[i], scale_factors[i],\n                            rescale)\n                        det_segm_results.append(segm_result)\n\n        # return results\n        if self.with_mask:\n            return list(zip(det_bbox_results, det_segm_results))\n        else:\n            return det_bbox_results\n\n    def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):\n        if self.with_semantic:\n            semantic_feats = [\n                self.semantic_head(feat)[1] for feat in img_feats\n            ]\n        else:\n            semantic_feats = [None] * len(img_metas)\n\n        if self.with_glbctx:\n            glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats]\n        else:\n            glbctx_feats = [None] * len(img_metas)\n\n        rcnn_test_cfg = self.test_cfg\n        aug_bboxes = []\n        aug_scores = []\n        for x, img_meta, semantic_feat, glbctx_feat in zip(\n                img_feats, img_metas, semantic_feats, glbctx_feats):\n            # only one image in the batch\n            img_shape = img_meta[0]['img_shape']\n            scale_factor = img_meta[0]['scale_factor']\n            flip = img_meta[0]['flip']\n\n            proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n                                     scale_factor, flip)\n            # \"ms\" in variable names means multi-stage\n            ms_scores = []\n\n            rois = bbox2roi([proposals])\n\n            if rois.shape[0] == 0:\n                # There is no proposal in the single image\n                aug_bboxes.append(rois.new_zeros(0, 4))\n                aug_scores.append(rois.new_zeros(0, 1))\n                continue\n\n            for i in range(self.num_stages):\n                bbox_head = self.bbox_head[i]\n                bbox_results = self._bbox_forward(\n                    i,\n                    x,\n                    rois,\n                    semantic_feat=semantic_feat,\n                    glbctx_feat=glbctx_feat)\n                ms_scores.append(bbox_results['cls_score'])\n                if i < self.num_stages - 1:\n                    bbox_label = bbox_results['cls_score'].argmax(dim=1)\n                    rois = bbox_head.regress_by_class(\n                        rois, bbox_label, bbox_results['bbox_pred'],\n                        img_meta[0])\n\n            cls_score = sum(ms_scores) / float(len(ms_scores))\n            bboxes, scores = self.bbox_head[-1].get_bboxes(\n                rois,\n                cls_score,\n                bbox_results['bbox_pred'],\n                img_shape,\n                scale_factor,\n                rescale=False,\n                cfg=None)\n            aug_bboxes.append(bboxes)\n            aug_scores.append(scores)\n\n        # after merging, bboxes will be rescaled to the original image size\n        merged_bboxes, merged_scores = merge_aug_bboxes(\n            aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n        det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n                                                rcnn_test_cfg.score_thr,\n                                                rcnn_test_cfg.nms,\n                                                rcnn_test_cfg.max_per_img)\n\n        det_bbox_results = bbox2result(det_bboxes, det_labels,\n                                       self.bbox_head[-1].num_classes)\n\n        if self.with_mask:\n            if det_bboxes.shape[0] == 0:\n                det_segm_results = [[]\n                                    for _ in range(self.mask_head.num_classes)]\n            else:\n                aug_masks = []\n                for x, img_meta, semantic_feat, glbctx_feat in zip(\n                        img_feats, img_metas, semantic_feats, glbctx_feats):\n                    img_shape = img_meta[0]['img_shape']\n                    scale_factor = img_meta[0]['scale_factor']\n                    flip = img_meta[0]['flip']\n                    _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n                                           scale_factor, flip)\n                    mask_rois = bbox2roi([_bboxes])\n                    # get relay feature on mask_rois\n                    bbox_results = self._bbox_forward(\n                        -1,\n                        x,\n                        mask_rois,\n                        semantic_feat=semantic_feat,\n                        glbctx_feat=glbctx_feat)\n                    relayed_feat = bbox_results['relayed_feat']\n                    relayed_feat = self.feat_relay_head(relayed_feat)\n                    mask_results = self._mask_forward(\n                        x,\n                        mask_rois,\n                        semantic_feat=semantic_feat,\n                        glbctx_feat=glbctx_feat,\n                        relayed_feat=relayed_feat)\n                    mask_pred = mask_results['mask_pred']\n                    aug_masks.append(mask_pred.sigmoid().cpu().numpy())\n                merged_masks = merge_aug_masks(aug_masks, img_metas,\n                                               self.test_cfg)\n                ori_shape = img_metas[0][0]['ori_shape']\n                det_segm_results = self.mask_head.get_seg_masks(\n                    merged_masks,\n                    det_bboxes,\n                    det_labels,\n                    rcnn_test_cfg,\n                    ori_shape,\n                    scale_factor=1.0,\n                    rescale=False)\n            return [(det_bbox_results, det_segm_results)]\n        else:\n            return [det_bbox_results]\n"
  },
  {
    "path": "mmdet/models/roi_heads/shared_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .res_layer import ResLayer\n\n__all__ = ['ResLayer']\n"
  },
  {
    "path": "mmdet/models/roi_heads/shared_heads/res_layer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.runner import BaseModule, auto_fp16\n\nfrom mmdet.models.backbones import ResNet\nfrom mmdet.models.builder import SHARED_HEADS\nfrom mmdet.models.utils import ResLayer as _ResLayer\n\n\n@SHARED_HEADS.register_module()\nclass ResLayer(BaseModule):\n\n    def __init__(self,\n                 depth,\n                 stage=3,\n                 stride=2,\n                 dilation=1,\n                 style='pytorch',\n                 norm_cfg=dict(type='BN', requires_grad=True),\n                 norm_eval=True,\n                 with_cp=False,\n                 dcn=None,\n                 pretrained=None,\n                 init_cfg=None):\n        super(ResLayer, self).__init__(init_cfg)\n\n        self.norm_eval = norm_eval\n        self.norm_cfg = norm_cfg\n        self.stage = stage\n        self.fp16_enabled = False\n        block, stage_blocks = ResNet.arch_settings[depth]\n        stage_block = stage_blocks[stage]\n        planes = 64 * 2**stage\n        inplanes = 64 * 2**(stage - 1) * block.expansion\n\n        res_layer = _ResLayer(\n            block,\n            inplanes,\n            planes,\n            stage_block,\n            stride=stride,\n            dilation=dilation,\n            style=style,\n            with_cp=with_cp,\n            norm_cfg=self.norm_cfg,\n            dcn=dcn)\n        self.add_module(f'layer{stage + 1}', res_layer)\n\n        assert not (init_cfg and pretrained), \\\n            'init_cfg and pretrained cannot be specified at the same time'\n        if isinstance(pretrained, str):\n            warnings.warn('DeprecationWarning: pretrained is a deprecated, '\n                          'please use \"init_cfg\" instead')\n            self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n        elif pretrained is None:\n            if init_cfg is None:\n                self.init_cfg = [\n                    dict(type='Kaiming', layer='Conv2d'),\n                    dict(\n                        type='Constant',\n                        val=1,\n                        layer=['_BatchNorm', 'GroupNorm'])\n                ]\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n    @auto_fp16()\n    def forward(self, x):\n        res_layer = getattr(self, f'layer{self.stage + 1}')\n        out = res_layer(x)\n        return out\n\n    def train(self, mode=True):\n        super(ResLayer, self).train(mode)\n        if self.norm_eval:\n            for m in self.modules():\n                if isinstance(m, nn.BatchNorm2d):\n                    m.eval()\n"
  },
  {
    "path": "mmdet/models/roi_heads/sparse_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh\nfrom mmdet.core.bbox.samplers import PseudoSampler\nfrom ..builder import HEADS\nfrom .cascade_roi_head import CascadeRoIHead\n\n\n@HEADS.register_module()\nclass SparseRoIHead(CascadeRoIHead):\n    r\"\"\"The RoIHead for `Sparse R-CNN: End-to-End Object Detection with\n    Learnable Proposals <https://arxiv.org/abs/2011.12450>`_\n    and `Instances as Queries <http://arxiv.org/abs/2105.01928>`_\n\n    Args:\n        num_stages (int): Number of stage whole iterative process.\n            Defaults to 6.\n        stage_loss_weights (Tuple[float]): The loss\n            weight of each stage. By default all stages have\n            the same weight 1.\n        bbox_roi_extractor (dict): Config of box roi extractor.\n        mask_roi_extractor (dict): Config of mask roi extractor.\n        bbox_head (dict): Config of box head.\n        mask_head (dict): Config of mask head.\n        train_cfg (dict, optional): Configuration information in train stage.\n            Defaults to None.\n        test_cfg (dict, optional): Configuration information in test stage.\n            Defaults to None.\n        pretrained (str, optional): model pretrained path. Default: None\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    \"\"\"\n\n    def __init__(self,\n                 num_stages=6,\n                 stage_loss_weights=(1, 1, 1, 1, 1, 1),\n                 proposal_feature_channel=256,\n                 bbox_roi_extractor=dict(\n                     type='SingleRoIExtractor',\n                     roi_layer=dict(\n                         type='RoIAlign', output_size=7, sampling_ratio=2),\n                     out_channels=256,\n                     featmap_strides=[4, 8, 16, 32]),\n                 mask_roi_extractor=None,\n                 bbox_head=dict(\n                     type='DIIHead',\n                     num_classes=80,\n                     num_fcs=2,\n                     num_heads=8,\n                     num_cls_fcs=1,\n                     num_reg_fcs=3,\n                     feedforward_channels=2048,\n                     hidden_channels=256,\n                     dropout=0.0,\n                     roi_feat_size=7,\n                     ffn_act_cfg=dict(type='ReLU', inplace=True)),\n                 mask_head=None,\n                 train_cfg=None,\n                 test_cfg=None,\n                 pretrained=None,\n                 init_cfg=None):\n        assert bbox_roi_extractor is not None\n        assert bbox_head is not None\n        assert len(stage_loss_weights) == num_stages\n        self.num_stages = num_stages\n        self.stage_loss_weights = stage_loss_weights\n        self.proposal_feature_channel = proposal_feature_channel\n        super(SparseRoIHead, self).__init__(\n            num_stages,\n            stage_loss_weights,\n            bbox_roi_extractor=bbox_roi_extractor,\n            mask_roi_extractor=mask_roi_extractor,\n            bbox_head=bbox_head,\n            mask_head=mask_head,\n            train_cfg=train_cfg,\n            test_cfg=test_cfg,\n            pretrained=pretrained,\n            init_cfg=init_cfg)\n        # train_cfg would be None when run the test.py\n        if train_cfg is not None:\n            for stage in range(num_stages):\n                assert isinstance(self.bbox_sampler[stage], PseudoSampler), \\\n                    'Sparse R-CNN and QueryInst only support `PseudoSampler`'\n\n    def _bbox_forward(self, stage, x, rois, object_feats, img_metas):\n        \"\"\"Box head forward function used in both training and testing. Returns\n        all regression, classification results and a intermediate feature.\n\n        Args:\n            stage (int): The index of current stage in\n                iterative process.\n            x (List[Tensor]): List of FPN features\n            rois (Tensor): Rois in total batch. With shape (num_proposal, 5).\n                the last dimension 5 represents (img_index, x1, y1, x2, y2).\n            object_feats (Tensor): The object feature extracted from\n                the previous stage.\n            img_metas (dict): meta information of images.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of bbox head outputs,\n                Containing the following results:\n\n                    - cls_score (Tensor): The score of each class, has\n                      shape (batch_size, num_proposals, num_classes)\n                      when use focal loss or\n                      (batch_size, num_proposals, num_classes+1)\n                      otherwise.\n                    - decode_bbox_pred (Tensor): The regression results\n                      with shape (batch_size, num_proposal, 4).\n                      The last dimension 4 represents\n                      [tl_x, tl_y, br_x, br_y].\n                    - object_feats (Tensor): The object feature extracted\n                      from current stage\n                    - detach_cls_score_list (list[Tensor]): The detached\n                      classification results, length is batch_size, and\n                      each tensor has shape (num_proposal, num_classes).\n                    - detach_proposal_list (list[tensor]): The detached\n                      regression results, length is batch_size, and each\n                      tensor has shape (num_proposal, 4). The last\n                      dimension 4 represents [tl_x, tl_y, br_x, br_y].\n        \"\"\"\n        num_imgs = len(img_metas)\n        bbox_roi_extractor = self.bbox_roi_extractor[stage]\n        bbox_head = self.bbox_head[stage]\n        bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n                                        rois)\n        cls_score, bbox_pred, object_feats, attn_feats = bbox_head(\n            bbox_feats, object_feats)\n        proposal_list = self.bbox_head[stage].refine_bboxes(\n            rois,\n            rois.new_zeros(len(rois)),  # dummy arg\n            bbox_pred.view(-1, bbox_pred.size(-1)),\n            [rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)],\n            img_metas)\n        bbox_results = dict(\n            cls_score=cls_score,\n            decode_bbox_pred=torch.cat(proposal_list),\n            object_feats=object_feats,\n            attn_feats=attn_feats,\n            # detach then use it in label assign\n            detach_cls_score_list=[\n                cls_score[i].detach() for i in range(num_imgs)\n            ],\n            detach_proposal_list=[item.detach() for item in proposal_list])\n\n        return bbox_results\n\n    def _mask_forward(self, stage, x, rois, attn_feats):\n        \"\"\"Mask head forward function used in both training and testing.\"\"\"\n        mask_roi_extractor = self.mask_roi_extractor[stage]\n        mask_head = self.mask_head[stage]\n        mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n                                        rois)\n        # do not support caffe_c4 model anymore\n        mask_pred = mask_head(mask_feats, attn_feats)\n\n        mask_results = dict(mask_pred=mask_pred)\n        return mask_results\n\n    def _mask_forward_train(self, stage, x, attn_feats, sampling_results,\n                            gt_masks, rcnn_train_cfg):\n        \"\"\"Run forward function and calculate loss for mask head in\n        training.\"\"\"\n        pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n        attn_feats = torch.cat([\n            feats[res.pos_inds]\n            for (feats, res) in zip(attn_feats, sampling_results)\n        ])\n        mask_results = self._mask_forward(stage, x, pos_rois, attn_feats)\n\n        mask_targets = self.mask_head[stage].get_targets(\n            sampling_results, gt_masks, rcnn_train_cfg)\n\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n\n        loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],\n                                               mask_targets, pos_labels)\n        mask_results.update(loss_mask)\n        return mask_results\n\n    def forward_train(self,\n                      x,\n                      proposal_boxes,\n                      proposal_features,\n                      img_metas,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      imgs_whwh=None,\n                      gt_masks=None):\n        \"\"\"Forward function in training stage.\n\n        Args:\n            x (list[Tensor]): list of multi-level img features.\n            proposals (Tensor): Decoded proposal bboxes, has shape\n                (batch_size, num_proposals, 4)\n            proposal_features (Tensor): Expanded proposal\n                features, has shape\n                (batch_size, num_proposals, proposal_feature_channel)\n            img_metas (list[dict]): list of image info dict where\n                each dict has: 'img_shape', 'scale_factor', 'flip',\n                and may also contain 'filename', 'ori_shape',\n                'pad_shape', and 'img_norm_cfg'. For details on the\n                values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n            imgs_whwh (Tensor): Tensor with shape (batch_size, 4),\n                    the dimension means\n                    [img_width,img_height, img_width, img_height].\n            gt_masks (None | Tensor) : true segmentation masks for each box\n                used if the architecture supports a segmentation task.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components of all stage.\n        \"\"\"\n\n        num_imgs = len(img_metas)\n        num_proposals = proposal_boxes.size(1)\n        imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1)\n        all_stage_bbox_results = []\n        proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]\n        object_feats = proposal_features\n        all_stage_loss = {}\n        for stage in range(self.num_stages):\n            rois = bbox2roi(proposal_list)\n            bbox_results = self._bbox_forward(stage, x, rois, object_feats,\n                                              img_metas)\n            all_stage_bbox_results.append(bbox_results)\n            if gt_bboxes_ignore is None:\n                # TODO support ignore\n                gt_bboxes_ignore = [None for _ in range(num_imgs)]\n            sampling_results = []\n            cls_pred_list = bbox_results['detach_cls_score_list']\n            proposal_list = bbox_results['detach_proposal_list']\n            for i in range(num_imgs):\n                normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] /\n                                                          imgs_whwh[i])\n                assign_result = self.bbox_assigner[stage].assign(\n                    normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i],\n                    gt_labels[i], img_metas[i])\n                sampling_result = self.bbox_sampler[stage].sample(\n                    assign_result, proposal_list[i], gt_bboxes[i])\n                sampling_results.append(sampling_result)\n            bbox_targets = self.bbox_head[stage].get_targets(\n                sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage],\n                True)\n            cls_score = bbox_results['cls_score']\n            decode_bbox_pred = bbox_results['decode_bbox_pred']\n\n            single_stage_loss = self.bbox_head[stage].loss(\n                cls_score.view(-1, cls_score.size(-1)),\n                decode_bbox_pred.view(-1, 4),\n                *bbox_targets,\n                imgs_whwh=imgs_whwh)\n\n            if self.with_mask:\n                mask_results = self._mask_forward_train(\n                    stage, x, bbox_results['attn_feats'], sampling_results,\n                    gt_masks, self.train_cfg[stage])\n                single_stage_loss['loss_mask'] = mask_results['loss_mask']\n\n            for key, value in single_stage_loss.items():\n                all_stage_loss[f'stage{stage}_{key}'] = value * \\\n                                    self.stage_loss_weights[stage]\n            object_feats = bbox_results['object_feats']\n\n        return all_stage_loss\n\n    def simple_test(self,\n                    x,\n                    proposal_boxes,\n                    proposal_features,\n                    img_metas,\n                    imgs_whwh,\n                    rescale=False):\n        \"\"\"Test without augmentation.\n\n        Args:\n            x (list[Tensor]): list of multi-level img features.\n            proposal_boxes (Tensor): Decoded proposal bboxes, has shape\n                (batch_size, num_proposals, 4)\n            proposal_features (Tensor): Expanded proposal\n                features, has shape\n                (batch_size, num_proposals, proposal_feature_channel)\n            img_metas (dict): meta information of images.\n            imgs_whwh (Tensor): Tensor with shape (batch_size, 4),\n                    the dimension means\n                    [img_width,img_height, img_width, img_height].\n            rescale (bool): If True, return boxes in original image\n                space. Defaults to False.\n\n        Returns:\n            list[list[np.ndarray]] or list[tuple]: When no mask branch,\n            it is bbox results of each image and classes with type\n            `list[list[np.ndarray]]`. The outer list\n            corresponds to each image. The inner list\n            corresponds to each class. When the model has a mask branch,\n            it is a list[tuple] that contains bbox results and mask results.\n            The outer list corresponds to each image, and first element\n            of tuple is bbox results, second element is mask results.\n        \"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        # Decode initial proposals\n        num_imgs = len(img_metas)\n        proposal_list = [proposal_boxes[i] for i in range(num_imgs)]\n        ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n        scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n        object_feats = proposal_features\n        if all([proposal.shape[0] == 0 for proposal in proposal_list]):\n            # There is no proposal in the whole batch\n            bbox_results = [[\n                np.zeros((0, 5), dtype=np.float32)\n                for i in range(self.bbox_head[-1].num_classes)\n            ]] * num_imgs\n            return bbox_results\n\n        for stage in range(self.num_stages):\n            rois = bbox2roi(proposal_list)\n            bbox_results = self._bbox_forward(stage, x, rois, object_feats,\n                                              img_metas)\n            object_feats = bbox_results['object_feats']\n            cls_score = bbox_results['cls_score']\n            proposal_list = bbox_results['detach_proposal_list']\n\n        if self.with_mask:\n            rois = bbox2roi(proposal_list)\n            mask_results = self._mask_forward(stage, x, rois,\n                                              bbox_results['attn_feats'])\n            mask_results['mask_pred'] = mask_results['mask_pred'].reshape(\n                num_imgs, -1, *mask_results['mask_pred'].size()[1:])\n\n        num_classes = self.bbox_head[-1].num_classes\n        det_bboxes = []\n        det_labels = []\n\n        if self.bbox_head[-1].loss_cls.use_sigmoid:\n            cls_score = cls_score.sigmoid()\n        else:\n            cls_score = cls_score.softmax(-1)[..., :-1]\n\n        for img_id in range(num_imgs):\n            cls_score_per_img = cls_score[img_id]\n            scores_per_img, topk_indices = cls_score_per_img.flatten(\n                0, 1).topk(\n                    self.test_cfg.max_per_img, sorted=False)\n            labels_per_img = topk_indices % num_classes\n            bbox_pred_per_img = proposal_list[img_id][topk_indices //\n                                                      num_classes]\n            if rescale:\n                scale_factor = img_metas[img_id]['scale_factor']\n                bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor)\n            det_bboxes.append(\n                torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1))\n            det_labels.append(labels_per_img)\n\n        bbox_results = [\n            bbox2result(det_bboxes[i], det_labels[i], num_classes)\n            for i in range(num_imgs)\n        ]\n\n        if self.with_mask:\n            if rescale and not isinstance(scale_factors[0], float):\n                scale_factors = [\n                    torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n                    for scale_factor in scale_factors\n                ]\n            _bboxes = [\n                det_bboxes[i][:, :4] *\n                scale_factors[i] if rescale else det_bboxes[i][:, :4]\n                for i in range(len(det_bboxes))\n            ]\n            segm_results = []\n            mask_pred = mask_results['mask_pred']\n            for img_id in range(num_imgs):\n                mask_pred_per_img = mask_pred[img_id].flatten(0,\n                                                              1)[topk_indices]\n                mask_pred_per_img = mask_pred_per_img[:, None, ...].repeat(\n                    1, num_classes, 1, 1)\n                segm_result = self.mask_head[-1].get_seg_masks(\n                    mask_pred_per_img, _bboxes[img_id], det_labels[img_id],\n                    self.test_cfg, ori_shapes[img_id], scale_factors[img_id],\n                    rescale)\n                segm_results.append(segm_result)\n\n        if self.with_mask:\n            results = list(zip(bbox_results, segm_results))\n        else:\n            results = bbox_results\n\n        return results\n\n    def aug_test(self, features, proposal_list, img_metas, rescale=False):\n        raise NotImplementedError(\n            'Sparse R-CNN and QueryInst does not support `aug_test`')\n\n    def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas):\n        \"\"\"Dummy forward function when do the flops computing.\"\"\"\n        all_stage_bbox_results = []\n        proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]\n        object_feats = proposal_features\n        if self.with_bbox:\n            for stage in range(self.num_stages):\n                rois = bbox2roi(proposal_list)\n                bbox_results = self._bbox_forward(stage, x, rois, object_feats,\n                                                  img_metas)\n\n                all_stage_bbox_results.append((bbox_results, ))\n                proposal_list = bbox_results['detach_proposal_list']\n                object_feats = bbox_results['object_feats']\n\n                if self.with_mask:\n                    rois = bbox2roi(proposal_list)\n                    mask_results = self._mask_forward(\n                        stage, x, rois, bbox_results['attn_feats'])\n                    all_stage_bbox_results[-1] += (mask_results, )\n        return all_stage_bbox_results\n"
  },
  {
    "path": "mmdet/models/roi_heads/standard_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler\nfrom ..builder import HEADS, build_head, build_roi_extractor\nfrom .base_roi_head import BaseRoIHead\nfrom .test_mixins import BBoxTestMixin, MaskTestMixin\n\n\n@HEADS.register_module()\nclass StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):\n    \"\"\"Simplest base roi head including one bbox head and one mask head.\"\"\"\n\n    def init_assigner_sampler(self):\n        \"\"\"Initialize assigner and sampler.\"\"\"\n        self.bbox_assigner = None\n        self.bbox_sampler = None\n        if self.train_cfg:\n            self.bbox_assigner = build_assigner(self.train_cfg.assigner)\n            self.bbox_sampler = build_sampler(\n                self.train_cfg.sampler, context=self)\n\n    def init_bbox_head(self, bbox_roi_extractor, bbox_head):\n        \"\"\"Initialize ``bbox_head``\"\"\"\n        self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)\n        self.bbox_head = build_head(bbox_head)\n\n    def init_mask_head(self, mask_roi_extractor, mask_head):\n        \"\"\"Initialize ``mask_head``\"\"\"\n        if mask_roi_extractor is not None:\n            self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)\n            self.share_roi_extractor = False\n        else:\n            self.share_roi_extractor = True\n            self.mask_roi_extractor = self.bbox_roi_extractor\n        self.mask_head = build_head(mask_head)\n\n    def forward_dummy(self, x, proposals):\n        \"\"\"Dummy forward function.\"\"\"\n        # bbox head\n        outs = ()\n        rois = bbox2roi([proposals])\n        if self.with_bbox:\n            bbox_results = self._bbox_forward(x, rois)\n            outs = outs + (bbox_results['cls_score'],\n                           bbox_results['bbox_pred'])\n        # mask head\n        if self.with_mask:\n            mask_rois = rois[:100]\n            mask_results = self._mask_forward(x, mask_rois)\n            outs = outs + (mask_results['mask_pred'], )\n        return outs\n\n    def forward_train(self,\n                      x,\n                      img_metas,\n                      proposal_list,\n                      gt_bboxes,\n                      gt_labels,\n                      gt_bboxes_ignore=None,\n                      gt_masks=None,\n                      **kwargs):\n        \"\"\"\n        Args:\n            x (list[Tensor]): list of multi-level img features.\n            img_metas (list[dict]): list of image info dict where each dict\n                has: 'img_shape', 'scale_factor', 'flip', and may also contain\n                'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n                For details on the values of these keys see\n                `mmdet/datasets/pipelines/formatting.py:Collect`.\n            proposals (list[Tensors]): list of region proposals.\n            gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n                shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n            gt_labels (list[Tensor]): class indices corresponding to each box\n            gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n                boxes can be ignored when computing the loss.\n            gt_masks (None | Tensor) : true segmentation masks for each box\n                used if the architecture supports a segmentation task.\n\n        Returns:\n            dict[str, Tensor]: a dictionary of loss components\n        \"\"\"\n        # assign gts and sample proposals\n        if self.with_bbox or self.with_mask:\n            num_imgs = len(img_metas)\n            if gt_bboxes_ignore is None:\n                gt_bboxes_ignore = [None for _ in range(num_imgs)]\n            sampling_results = []\n            for i in range(num_imgs):\n                assign_result = self.bbox_assigner.assign(\n                    proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],\n                    gt_labels[i])\n                sampling_result = self.bbox_sampler.sample(\n                    assign_result,\n                    proposal_list[i],\n                    gt_bboxes[i],\n                    gt_labels[i],\n                    feats=[lvl_feat[i][None] for lvl_feat in x])\n                sampling_results.append(sampling_result)\n\n        losses = dict()\n        # bbox head forward and loss\n        if self.with_bbox:\n            bbox_results = self._bbox_forward_train(x, sampling_results,\n                                                    gt_bboxes, gt_labels,\n                                                    img_metas)\n            losses.update(bbox_results['loss_bbox'])\n\n        # mask head forward and loss\n        if self.with_mask:\n            mask_results = self._mask_forward_train(x, sampling_results,\n                                                    bbox_results['bbox_feats'],\n                                                    gt_masks, img_metas)\n            losses.update(mask_results['loss_mask'])\n\n        return losses\n\n    def _bbox_forward(self, x, rois):\n        \"\"\"Box head forward function used in both training and testing.\"\"\"\n        # TODO: a more flexible way to decide which feature maps to use\n        bbox_feats = self.bbox_roi_extractor(\n            x[:self.bbox_roi_extractor.num_inputs], rois)\n        if self.with_shared_head:\n            bbox_feats = self.shared_head(bbox_feats)\n        cls_score, bbox_pred = self.bbox_head(bbox_feats)\n\n        bbox_results = dict(\n            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n        return bbox_results\n\n    def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,\n                            img_metas):\n        \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n        rois = bbox2roi([res.bboxes for res in sampling_results])\n        bbox_results = self._bbox_forward(x, rois)\n\n        bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,\n                                                  gt_labels, self.train_cfg)\n        loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],\n                                        bbox_results['bbox_pred'], rois,\n                                        *bbox_targets)\n\n        bbox_results.update(loss_bbox=loss_bbox)\n        return bbox_results\n\n    def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,\n                            img_metas):\n        \"\"\"Run forward function and calculate loss for mask head in\n        training.\"\"\"\n        if not self.share_roi_extractor:\n            pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n            mask_results = self._mask_forward(x, pos_rois)\n        else:\n            pos_inds = []\n            device = bbox_feats.device\n            for res in sampling_results:\n                pos_inds.append(\n                    torch.ones(\n                        res.pos_bboxes.shape[0],\n                        device=device,\n                        dtype=torch.uint8))\n                pos_inds.append(\n                    torch.zeros(\n                        res.neg_bboxes.shape[0],\n                        device=device,\n                        dtype=torch.uint8))\n            pos_inds = torch.cat(pos_inds)\n\n            mask_results = self._mask_forward(\n                x, pos_inds=pos_inds, bbox_feats=bbox_feats)\n\n        mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,\n                                                  self.train_cfg)\n        pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n        loss_mask = self.mask_head.loss(mask_results['mask_pred'],\n                                        mask_targets, pos_labels)\n\n        mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)\n        return mask_results\n\n    def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):\n        \"\"\"Mask head forward function used in both training and testing.\"\"\"\n        assert ((rois is not None) ^\n                (pos_inds is not None and bbox_feats is not None))\n        if rois is not None:\n            mask_feats = self.mask_roi_extractor(\n                x[:self.mask_roi_extractor.num_inputs], rois)\n            if self.with_shared_head:\n                mask_feats = self.shared_head(mask_feats)\n        else:\n            assert bbox_feats is not None\n            mask_feats = bbox_feats[pos_inds]\n\n        mask_pred = self.mask_head(mask_feats)\n        mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)\n        return mask_results\n\n    async def async_simple_test(self,\n                                x,\n                                proposal_list,\n                                img_metas,\n                                proposals=None,\n                                rescale=False):\n        \"\"\"Async test without augmentation.\"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n\n        det_bboxes, det_labels = await self.async_test_bboxes(\n            x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n        bbox_results = bbox2result(det_bboxes, det_labels,\n                                   self.bbox_head.num_classes)\n        if not self.with_mask:\n            return bbox_results\n        else:\n            segm_results = await self.async_test_mask(\n                x,\n                img_metas,\n                det_bboxes,\n                det_labels,\n                rescale=rescale,\n                mask_test_cfg=self.test_cfg.get('mask'))\n            return bbox_results, segm_results\n\n    def simple_test(self,\n                    x,\n                    proposal_list,\n                    img_metas,\n                    proposals=None,\n                    rescale=False):\n        \"\"\"Test without augmentation.\n\n        Args:\n            x (tuple[Tensor]): Features from upstream network. Each\n                has shape (batch_size, c, h, w).\n            proposal_list (list(Tensor)): Proposals from rpn head.\n                Each has shape (num_proposals, 5), last dimension\n                5 represent (x1, y1, x2, y2, score).\n            img_metas (list[dict]): Meta information of images.\n            rescale (bool): Whether to rescale the results to\n                the original image. Default: True.\n\n        Returns:\n            list[list[np.ndarray]] or list[tuple]: When no mask branch,\n            it is bbox results of each image and classes with type\n            `list[list[np.ndarray]]`. The outer list\n            corresponds to each image. The inner list\n            corresponds to each class. When the model has mask branch,\n            it contains bbox results and mask results.\n            The outer list corresponds to each image, and first element\n            of tuple is bbox results, second element is mask results.\n        \"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n\n        det_bboxes, det_labels = self.simple_test_bboxes(\n            x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n\n        bbox_results = [\n            bbox2result(det_bboxes[i], det_labels[i],\n                        self.bbox_head.num_classes)\n            for i in range(len(det_bboxes))\n        ]\n\n        if not self.with_mask:\n            return bbox_results\n        else:\n            segm_results = self.simple_test_mask(\n                x, img_metas, det_bboxes, det_labels, rescale=rescale)\n            return list(zip(bbox_results, segm_results))\n\n    def aug_test(self, x, proposal_list, img_metas, rescale=False):\n        \"\"\"Test with augmentations.\n\n        If rescale is False, then returned bboxes and masks will fit the scale\n        of imgs[0].\n        \"\"\"\n        det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,\n                                                      proposal_list,\n                                                      self.test_cfg)\n        if rescale:\n            _det_bboxes = det_bboxes\n        else:\n            _det_bboxes = det_bboxes.clone()\n            _det_bboxes[:, :4] *= det_bboxes.new_tensor(\n                img_metas[0][0]['scale_factor'])\n        bbox_results = bbox2result(_det_bboxes, det_labels,\n                                   self.bbox_head.num_classes)\n\n        # det_bboxes always keep the original scale\n        if self.with_mask:\n            segm_results = self.aug_test_mask(x, img_metas, det_bboxes,\n                                              det_labels)\n            return [(bbox_results, segm_results)]\n        else:\n            return [bbox_results]\n\n    def onnx_export(self, x, proposals, img_metas, rescale=False):\n        \"\"\"Test without augmentation.\"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        det_bboxes, det_labels = self.bbox_onnx_export(\n            x, img_metas, proposals, self.test_cfg, rescale=rescale)\n\n        if not self.with_mask:\n            return det_bboxes, det_labels\n        else:\n            segm_results = self.mask_onnx_export(\n                x, img_metas, det_bboxes, det_labels, rescale=rescale)\n            return det_bboxes, det_labels, segm_results\n\n    def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs):\n        \"\"\"Export mask branch to onnx which supports batch inference.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            img_metas (list[dict]): Image meta info.\n            det_bboxes (Tensor): Bboxes and corresponding scores.\n                has shape [N, num_bboxes, 5].\n            det_labels (Tensor): class labels of\n                shape [N, num_bboxes].\n\n        Returns:\n            Tensor: The segmentation results of shape [N, num_bboxes,\n                image_height, image_width].\n        \"\"\"\n        # image shapes of images in the batch\n\n        if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n            raise RuntimeError('[ONNX Error] Can not record MaskHead '\n                               'as it has not been executed this time')\n        batch_size = det_bboxes.size(0)\n        # if det_bboxes is rescaled to the original image size, we need to\n        # rescale it back to the testing scale to obtain RoIs.\n        det_bboxes = det_bboxes[..., :4]\n        batch_index = torch.arange(\n            det_bboxes.size(0), device=det_bboxes.device).float().view(\n                -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)\n        mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)\n        mask_rois = mask_rois.view(-1, 5)\n        mask_results = self._mask_forward(x, mask_rois)\n        mask_pred = mask_results['mask_pred']\n        max_shape = img_metas[0]['img_shape_for_onnx']\n        num_det = det_bboxes.shape[1]\n        det_bboxes = det_bboxes.reshape(-1, 4)\n        det_labels = det_labels.reshape(-1)\n        segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes,\n                                                  det_labels, self.test_cfg,\n                                                  max_shape)\n        segm_results = segm_results.reshape(batch_size, num_det, max_shape[0],\n                                            max_shape[1])\n        return segm_results\n\n    def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg,\n                         **kwargs):\n        \"\"\"Export bbox branch to onnx which supports batch inference.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            img_metas (list[dict]): Image meta info.\n            proposals (Tensor): Region proposals with\n                batch dimension, has shape [N, num_bboxes, 5].\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n\n        Returns:\n            tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]\n                and class labels of shape [N, num_bboxes].\n        \"\"\"\n        # get origin input shape to support onnx dynamic input shape\n        assert len(\n            img_metas\n        ) == 1, 'Only support one input image while in exporting to ONNX'\n        img_shapes = img_metas[0]['img_shape_for_onnx']\n\n        rois = proposals\n\n        batch_index = torch.arange(\n            rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(\n                rois.size(0), rois.size(1), 1)\n\n        rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n        batch_size = rois.shape[0]\n        num_proposals_per_img = rois.shape[1]\n\n        # Eliminate the batch dimension\n        rois = rois.view(-1, 5)\n        bbox_results = self._bbox_forward(x, rois)\n        cls_score = bbox_results['cls_score']\n        bbox_pred = bbox_results['bbox_pred']\n\n        # Recover the batch dimension\n        rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n        cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n                                      cls_score.size(-1))\n\n        bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,\n                                      bbox_pred.size(-1))\n        det_bboxes, det_labels = self.bbox_head.onnx_export(\n            rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg)\n\n        return det_bboxes, det_labels\n"
  },
  {
    "path": "mmdet/models/roi_heads/test_mixins.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport sys\nimport warnings\n\nimport numpy as np\nimport torch\n\nfrom mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,\n                        merge_aug_masks, multiclass_nms)\n\nif sys.version_info >= (3, 7):\n    from mmdet.utils.contextmanagers import completed\n\n\nclass BBoxTestMixin:\n\n    if sys.version_info >= (3, 7):\n\n        async def async_test_bboxes(self,\n                                    x,\n                                    img_metas,\n                                    proposals,\n                                    rcnn_test_cfg,\n                                    rescale=False,\n                                    **kwargs):\n            \"\"\"Asynchronized test for box head without augmentation.\"\"\"\n            rois = bbox2roi(proposals)\n            roi_feats = self.bbox_roi_extractor(\n                x[:len(self.bbox_roi_extractor.featmap_strides)], rois)\n            if self.with_shared_head:\n                roi_feats = self.shared_head(roi_feats)\n            sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)\n\n            async with completed(\n                    __name__, 'bbox_head_forward',\n                    sleep_interval=sleep_interval):\n                cls_score, bbox_pred = self.bbox_head(roi_feats)\n\n            img_shape = img_metas[0]['img_shape']\n            scale_factor = img_metas[0]['scale_factor']\n            det_bboxes, det_labels = self.bbox_head.get_bboxes(\n                rois,\n                cls_score,\n                bbox_pred,\n                img_shape,\n                scale_factor,\n                rescale=rescale,\n                cfg=rcnn_test_cfg)\n            return det_bboxes, det_labels\n\n    def simple_test_bboxes(self,\n                           x,\n                           img_metas,\n                           proposals,\n                           rcnn_test_cfg,\n                           rescale=False):\n        \"\"\"Test only det bboxes without augmentation.\n\n        Args:\n            x (tuple[Tensor]): Feature maps of all scale level.\n            img_metas (list[dict]): Image meta info.\n            proposals (List[Tensor]): Region proposals.\n            rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n            rescale (bool): If True, return boxes in original image space.\n                Default: False.\n\n        Returns:\n            tuple[list[Tensor], list[Tensor]]: The first list contains\n                the boxes of the corresponding image in a batch, each\n                tensor has the shape (num_boxes, 5) and last dimension\n                5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor\n                in the second list is the labels with shape (num_boxes, ).\n                The length of both lists should be equal to batch_size.\n        \"\"\"\n\n        rois = bbox2roi(proposals)\n\n        if rois.shape[0] == 0:\n            batch_size = len(proposals)\n            det_bbox = rois.new_zeros(0, 5)\n            det_label = rois.new_zeros((0, ), dtype=torch.long)\n            if rcnn_test_cfg is None:\n                det_bbox = det_bbox[:, :4]\n                det_label = rois.new_zeros(\n                    (0, self.bbox_head.fc_cls.out_features))\n            # There is no proposal in the whole batch\n            return [det_bbox] * batch_size, [det_label] * batch_size\n\n        bbox_results = self._bbox_forward(x, rois)\n        img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n        scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n        # split batch bbox prediction back to each image\n        cls_score = bbox_results['cls_score']\n        bbox_pred = bbox_results['bbox_pred']\n        num_proposals_per_img = tuple(len(p) for p in proposals)\n        rois = rois.split(num_proposals_per_img, 0)\n        cls_score = cls_score.split(num_proposals_per_img, 0)\n\n        # some detector with_reg is False, bbox_pred will be None\n        if bbox_pred is not None:\n            # TODO move this to a sabl_roi_head\n            # the bbox prediction of some detectors like SABL is not Tensor\n            if isinstance(bbox_pred, torch.Tensor):\n                bbox_pred = bbox_pred.split(num_proposals_per_img, 0)\n            else:\n                bbox_pred = self.bbox_head.bbox_pred_split(\n                    bbox_pred, num_proposals_per_img)\n        else:\n            bbox_pred = (None, ) * len(proposals)\n\n        # apply bbox post-processing to each image individually\n        det_bboxes = []\n        det_labels = []\n        for i in range(len(proposals)):\n            if rois[i].shape[0] == 0:\n                # There is no proposal in the single image\n                det_bbox = rois[i].new_zeros(0, 5)\n                det_label = rois[i].new_zeros((0, ), dtype=torch.long)\n                if rcnn_test_cfg is None:\n                    det_bbox = det_bbox[:, :4]\n                    det_label = rois[i].new_zeros(\n                        (0, self.bbox_head.fc_cls.out_features))\n\n            else:\n                det_bbox, det_label = self.bbox_head.get_bboxes(\n                    rois[i],\n                    cls_score[i],\n                    bbox_pred[i],\n                    img_shapes[i],\n                    scale_factors[i],\n                    rescale=rescale,\n                    cfg=rcnn_test_cfg)\n            det_bboxes.append(det_bbox)\n            det_labels.append(det_label)\n        return det_bboxes, det_labels\n\n    def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):\n        \"\"\"Test det bboxes with test time augmentation.\"\"\"\n        aug_bboxes = []\n        aug_scores = []\n        for x, img_meta in zip(feats, img_metas):\n            # only one image in the batch\n            img_shape = img_meta[0]['img_shape']\n            scale_factor = img_meta[0]['scale_factor']\n            flip = img_meta[0]['flip']\n            flip_direction = img_meta[0]['flip_direction']\n            # TODO more flexible\n            proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n                                     scale_factor, flip, flip_direction)\n            rois = bbox2roi([proposals])\n            bbox_results = self._bbox_forward(x, rois)\n            bboxes, scores = self.bbox_head.get_bboxes(\n                rois,\n                bbox_results['cls_score'],\n                bbox_results['bbox_pred'],\n                img_shape,\n                scale_factor,\n                rescale=False,\n                cfg=None)\n            aug_bboxes.append(bboxes)\n            aug_scores.append(scores)\n        # after merging, bboxes will be rescaled to the original image size\n        merged_bboxes, merged_scores = merge_aug_bboxes(\n            aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n        if merged_bboxes.shape[0] == 0:\n            # There is no proposal in the single image\n            det_bboxes = merged_bboxes.new_zeros(0, 5)\n            det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long)\n        else:\n            det_bboxes, det_labels = multiclass_nms(merged_bboxes,\n                                                    merged_scores,\n                                                    rcnn_test_cfg.score_thr,\n                                                    rcnn_test_cfg.nms,\n                                                    rcnn_test_cfg.max_per_img)\n        return det_bboxes, det_labels\n\n\nclass MaskTestMixin:\n\n    if sys.version_info >= (3, 7):\n\n        async def async_test_mask(self,\n                                  x,\n                                  img_metas,\n                                  det_bboxes,\n                                  det_labels,\n                                  rescale=False,\n                                  mask_test_cfg=None):\n            \"\"\"Asynchronized test for mask head without augmentation.\"\"\"\n            # image shape of the first image in the batch (only one)\n            ori_shape = img_metas[0]['ori_shape']\n            scale_factor = img_metas[0]['scale_factor']\n            if det_bboxes.shape[0] == 0:\n                segm_result = [[] for _ in range(self.mask_head.num_classes)]\n            else:\n                if rescale and not isinstance(scale_factor,\n                                              (float, torch.Tensor)):\n                    scale_factor = det_bboxes.new_tensor(scale_factor)\n                _bboxes = (\n                    det_bboxes[:, :4] *\n                    scale_factor if rescale else det_bboxes)\n                mask_rois = bbox2roi([_bboxes])\n                mask_feats = self.mask_roi_extractor(\n                    x[:len(self.mask_roi_extractor.featmap_strides)],\n                    mask_rois)\n\n                if self.with_shared_head:\n                    mask_feats = self.shared_head(mask_feats)\n                if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):\n                    sleep_interval = mask_test_cfg['async_sleep_interval']\n                else:\n                    sleep_interval = 0.035\n                async with completed(\n                        __name__,\n                        'mask_head_forward',\n                        sleep_interval=sleep_interval):\n                    mask_pred = self.mask_head(mask_feats)\n                segm_result = self.mask_head.get_seg_masks(\n                    mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,\n                    scale_factor, rescale)\n            return segm_result\n\n    def simple_test_mask(self,\n                         x,\n                         img_metas,\n                         det_bboxes,\n                         det_labels,\n                         rescale=False):\n        \"\"\"Simple test for mask head without augmentation.\"\"\"\n        # image shapes of images in the batch\n        ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n        scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n        if isinstance(scale_factors[0], float):\n            warnings.warn(\n                'Scale factor in img_metas should be a '\n                'ndarray with shape (4,) '\n                'arrange as (factor_w, factor_h, factor_w, factor_h), '\n                'The scale_factor with float type has been deprecated. ')\n            scale_factors = np.array([scale_factors] * 4, dtype=np.float32)\n\n        num_imgs = len(det_bboxes)\n        if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n            segm_results = [[[] for _ in range(self.mask_head.num_classes)]\n                            for _ in range(num_imgs)]\n        else:\n            # if det_bboxes is rescaled to the original image size, we need to\n            # rescale it back to the testing scale to obtain RoIs.\n            if rescale:\n                scale_factors = [\n                    torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n                    for scale_factor in scale_factors\n                ]\n            _bboxes = [\n                det_bboxes[i][:, :4] *\n                scale_factors[i] if rescale else det_bboxes[i][:, :4]\n                for i in range(len(det_bboxes))\n            ]\n            mask_rois = bbox2roi(_bboxes)\n            mask_results = self._mask_forward(x, mask_rois)\n            mask_pred = mask_results['mask_pred']\n            # split batch mask prediction back to each image\n            num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]\n            mask_preds = mask_pred.split(num_mask_roi_per_img, 0)\n\n            # apply mask post-processing to each image individually\n            segm_results = []\n            for i in range(num_imgs):\n                if det_bboxes[i].shape[0] == 0:\n                    segm_results.append(\n                        [[] for _ in range(self.mask_head.num_classes)])\n                else:\n                    segm_result = self.mask_head.get_seg_masks(\n                        mask_preds[i], _bboxes[i], det_labels[i],\n                        self.test_cfg, ori_shapes[i], scale_factors[i],\n                        rescale)\n                    segm_results.append(segm_result)\n        return segm_results\n\n    def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):\n        \"\"\"Test for mask head with test time augmentation.\"\"\"\n        if det_bboxes.shape[0] == 0:\n            segm_result = [[] for _ in range(self.mask_head.num_classes)]\n        else:\n            aug_masks = []\n            for x, img_meta in zip(feats, img_metas):\n                img_shape = img_meta[0]['img_shape']\n                scale_factor = img_meta[0]['scale_factor']\n                flip = img_meta[0]['flip']\n                flip_direction = img_meta[0]['flip_direction']\n                _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n                                       scale_factor, flip, flip_direction)\n                mask_rois = bbox2roi([_bboxes])\n                mask_results = self._mask_forward(x, mask_rois)\n                # convert to numpy array to save memory\n                aug_masks.append(\n                    mask_results['mask_pred'].sigmoid().cpu().numpy())\n            merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)\n\n            ori_shape = img_metas[0][0]['ori_shape']\n            scale_factor = det_bboxes.new_ones(4)\n            segm_result = self.mask_head.get_seg_masks(\n                merged_masks,\n                det_bboxes,\n                det_labels,\n                self.test_cfg,\n                ori_shape,\n                scale_factor=scale_factor,\n                rescale=False)\n        return segm_result\n"
  },
  {
    "path": "mmdet/models/roi_heads/trident_roi_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.ops import batched_nms\n\nfrom mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,\n                        multiclass_nms)\nfrom mmdet.models.roi_heads.standard_roi_head import StandardRoIHead\nfrom ..builder import HEADS\n\n\n@HEADS.register_module()\nclass TridentRoIHead(StandardRoIHead):\n    \"\"\"Trident roi head.\n\n    Args:\n        num_branch (int): Number of branches in TridentNet.\n        test_branch_idx (int): In inference, all 3 branches will be used\n            if `test_branch_idx==-1`, otherwise only branch with index\n            `test_branch_idx` will be used.\n    \"\"\"\n\n    def __init__(self, num_branch, test_branch_idx, **kwargs):\n        self.num_branch = num_branch\n        self.test_branch_idx = test_branch_idx\n        super(TridentRoIHead, self).__init__(**kwargs)\n\n    def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels):\n        \"\"\"Merge bbox predictions of each branch.\"\"\"\n        if trident_det_bboxes.numel() == 0:\n            det_bboxes = trident_det_bboxes.new_zeros((0, 5))\n            det_labels = trident_det_bboxes.new_zeros((0, ), dtype=torch.long)\n        else:\n            nms_bboxes = trident_det_bboxes[:, :4]\n            nms_scores = trident_det_bboxes[:, 4].contiguous()\n            nms_inds = trident_det_labels\n            nms_cfg = self.test_cfg['nms']\n            det_bboxes, keep = batched_nms(nms_bboxes, nms_scores, nms_inds,\n                                           nms_cfg)\n            det_labels = trident_det_labels[keep]\n            if self.test_cfg['max_per_img'] > 0:\n                det_labels = det_labels[:self.test_cfg['max_per_img']]\n                det_bboxes = det_bboxes[:self.test_cfg['max_per_img']]\n\n        return det_bboxes, det_labels\n\n    def simple_test(self,\n                    x,\n                    proposal_list,\n                    img_metas,\n                    proposals=None,\n                    rescale=False):\n        \"\"\"Test without augmentation as follows:\n\n        1. Compute prediction bbox and label per branch.\n        2. Merge predictions of each branch according to scores of\n           bboxes, i.e., bboxes with higher score are kept to give\n           top-k prediction.\n        \"\"\"\n        assert self.with_bbox, 'Bbox head must be implemented.'\n        det_bboxes_list, det_labels_list = self.simple_test_bboxes(\n            x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n        num_branch = self.num_branch if self.test_branch_idx == -1 else 1\n        for _ in range(len(det_bboxes_list)):\n            if det_bboxes_list[_].shape[0] == 0:\n                det_bboxes_list[_] = det_bboxes_list[_].new_empty((0, 5))\n        det_bboxes, det_labels = [], []\n        for i in range(len(img_metas) // num_branch):\n            det_result = self.merge_trident_bboxes(\n                torch.cat(det_bboxes_list[i * num_branch:(i + 1) *\n                                          num_branch]),\n                torch.cat(det_labels_list[i * num_branch:(i + 1) *\n                                          num_branch]))\n            det_bboxes.append(det_result[0])\n            det_labels.append(det_result[1])\n\n        bbox_results = [\n            bbox2result(det_bboxes[i], det_labels[i],\n                        self.bbox_head.num_classes)\n            for i in range(len(det_bboxes))\n        ]\n        return bbox_results\n\n    def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):\n        \"\"\"Test det bboxes with test time augmentation.\"\"\"\n        aug_bboxes = []\n        aug_scores = []\n        for x, img_meta in zip(feats, img_metas):\n            # only one image in the batch\n            img_shape = img_meta[0]['img_shape']\n            scale_factor = img_meta[0]['scale_factor']\n            flip = img_meta[0]['flip']\n            flip_direction = img_meta[0]['flip_direction']\n\n            trident_bboxes, trident_scores = [], []\n            for branch_idx in range(len(proposal_list)):\n                proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n                                         scale_factor, flip, flip_direction)\n                rois = bbox2roi([proposals])\n                bbox_results = self._bbox_forward(x, rois)\n                bboxes, scores = self.bbox_head.get_bboxes(\n                    rois,\n                    bbox_results['cls_score'],\n                    bbox_results['bbox_pred'],\n                    img_shape,\n                    scale_factor,\n                    rescale=False,\n                    cfg=None)\n                trident_bboxes.append(bboxes)\n                trident_scores.append(scores)\n\n            aug_bboxes.append(torch.cat(trident_bboxes, 0))\n            aug_scores.append(torch.cat(trident_scores, 0))\n        # after merging, bboxes will be rescaled to the original image size\n        merged_bboxes, merged_scores = merge_aug_bboxes(\n            aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n        det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n                                                rcnn_test_cfg.score_thr,\n                                                rcnn_test_cfg.nms,\n                                                rcnn_test_cfg.max_per_img)\n        return det_bboxes, det_labels\n"
  },
  {
    "path": "mmdet/models/seg_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .panoptic_fpn_head import PanopticFPNHead  # noqa: F401,F403\nfrom .panoptic_fusion_heads import *  # noqa: F401,F403\n"
  },
  {
    "path": "mmdet/models/seg_heads/base_semantic_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nimport torch.nn.functional as F\nfrom mmcv.runner import BaseModule, force_fp32\n\nfrom ..builder import build_loss\nfrom ..utils import interpolate_as\n\n\nclass BaseSemanticHead(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base module of Semantic Head.\n\n    Args:\n        num_classes (int): the number of classes.\n        init_cfg (dict): the initialization config.\n        loss_seg (dict): the loss of the semantic head.\n    \"\"\"\n\n    def __init__(self,\n                 num_classes,\n                 init_cfg=None,\n                 loss_seg=dict(\n                     type='CrossEntropyLoss',\n                     ignore_index=255,\n                     loss_weight=1.0)):\n        super(BaseSemanticHead, self).__init__(init_cfg)\n        self.loss_seg = build_loss(loss_seg)\n        self.num_classes = num_classes\n\n    @force_fp32(apply_to=('seg_preds', ))\n    def loss(self, seg_preds, gt_semantic_seg):\n        \"\"\"Get the loss of semantic head.\n\n        Args:\n            seg_preds (Tensor): The input logits with the shape (N, C, H, W).\n            gt_semantic_seg: The ground truth of semantic segmentation with\n                the shape (N, H, W).\n            label_bias: The starting number of the semantic label.\n                Default: 1.\n\n        Returns:\n            dict: the loss of semantic head.\n        \"\"\"\n        if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]:\n            seg_preds = interpolate_as(seg_preds, gt_semantic_seg)\n        seg_preds = seg_preds.permute((0, 2, 3, 1))\n\n        loss_seg = self.loss_seg(\n            seg_preds.reshape(-1, self.num_classes),  # => [NxHxW, C]\n            gt_semantic_seg.reshape(-1).long())\n        return dict(loss_seg=loss_seg)\n\n    @abstractmethod\n    def forward(self, x):\n        \"\"\"Placeholder of forward function.\n\n        Returns:\n            dict[str, Tensor]: A dictionary, including features\n                and predicted scores. Required keys: 'seg_preds'\n                and 'feats'.\n        \"\"\"\n        pass\n\n    def forward_train(self, x, gt_semantic_seg):\n        output = self.forward(x)\n        seg_preds = output['seg_preds']\n        return self.loss(seg_preds, gt_semantic_seg)\n\n    def simple_test(self, x, img_metas, rescale=False):\n        output = self.forward(x)\n        seg_preds = output['seg_preds']\n        seg_preds = F.interpolate(\n            seg_preds,\n            size=img_metas[0]['pad_shape'][:2],\n            mode='bilinear',\n            align_corners=False)\n\n        if rescale:\n            h, w, _ = img_metas[0]['img_shape']\n            seg_preds = seg_preds[:, :, :h, :w]\n\n            h, w, _ = img_metas[0]['ori_shape']\n            seg_preds = F.interpolate(\n                seg_preds, size=(h, w), mode='bilinear', align_corners=False)\n        return seg_preds\n"
  },
  {
    "path": "mmdet/models/seg_heads/panoptic_fpn_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner import ModuleList\n\nfrom ..builder import HEADS\nfrom ..utils import ConvUpsample\nfrom .base_semantic_head import BaseSemanticHead\n\n\n@HEADS.register_module()\nclass PanopticFPNHead(BaseSemanticHead):\n    \"\"\"PanopticFPNHead used in Panoptic FPN.\n\n    In this head, the number of output channels is ``num_stuff_classes\n    + 1``, including all stuff classes and one thing class. The stuff\n    classes will be reset from ``0`` to ``num_stuff_classes - 1``, the\n    thing classes will be merged to ``num_stuff_classes``-th channel.\n\n    Arg:\n        num_things_classes (int): Number of thing classes. Default: 80.\n        num_stuff_classes (int): Number of stuff classes. Default: 53.\n        num_classes (int): Number of classes, including all stuff\n            classes and one thing class. This argument is deprecated,\n            please use ``num_things_classes`` and ``num_stuff_classes``.\n            The module will automatically infer the num_classes by\n            ``num_stuff_classes + 1``.\n        in_channels (int): Number of channels in the input feature\n            map.\n        inner_channels (int): Number of channels in inner features.\n        start_level (int): The start level of the input features\n            used in PanopticFPN.\n        end_level (int): The end level of the used features, the\n            ``end_level``-th layer will not be used.\n        fg_range (tuple): Range of the foreground classes. It starts\n            from ``0`` to ``num_things_classes-1``. Deprecated, please use\n             ``num_things_classes`` directly.\n        bg_range (tuple): Range of the background classes. It starts\n            from ``num_things_classes`` to ``num_things_classes +\n            num_stuff_classes - 1``. Deprecated, please use\n            ``num_stuff_classes`` and ``num_things_classes`` directly.\n        conv_cfg (dict): Dictionary to construct and config\n            conv layer. Default: None.\n        norm_cfg (dict): Dictionary to construct and config norm layer.\n            Use ``GN`` by default.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n        loss_seg (dict): the loss of the semantic head.\n    \"\"\"\n\n    def __init__(self,\n                 num_things_classes=80,\n                 num_stuff_classes=53,\n                 num_classes=None,\n                 in_channels=256,\n                 inner_channels=128,\n                 start_level=0,\n                 end_level=4,\n                 fg_range=None,\n                 bg_range=None,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\n                 init_cfg=None,\n                 loss_seg=dict(\n                     type='CrossEntropyLoss', ignore_index=-1,\n                     loss_weight=1.0)):\n        if num_classes is not None:\n            warnings.warn(\n                '`num_classes` is deprecated now, please set '\n                '`num_stuff_classes` directly, the `num_classes` will be '\n                'set to `num_stuff_classes + 1`')\n            # num_classes = num_stuff_classes + 1 for PanopticFPN.\n            assert num_classes == num_stuff_classes + 1\n        super(PanopticFPNHead, self).__init__(num_stuff_classes + 1, init_cfg,\n                                              loss_seg)\n        self.num_things_classes = num_things_classes\n        self.num_stuff_classes = num_stuff_classes\n        if fg_range is not None and bg_range is not None:\n            self.fg_range = fg_range\n            self.bg_range = bg_range\n            self.num_things_classes = fg_range[1] - fg_range[0] + 1\n            self.num_stuff_classes = bg_range[1] - bg_range[0] + 1\n            warnings.warn(\n                '`fg_range` and `bg_range` are deprecated now, '\n                f'please use `num_things_classes`={self.num_things_classes} '\n                f'and `num_stuff_classes`={self.num_stuff_classes} instead.')\n\n        # Used feature layers are [start_level, end_level)\n        self.start_level = start_level\n        self.end_level = end_level\n        self.num_stages = end_level - start_level\n        self.inner_channels = inner_channels\n\n        self.conv_upsample_layers = ModuleList()\n        for i in range(start_level, end_level):\n            self.conv_upsample_layers.append(\n                ConvUpsample(\n                    in_channels,\n                    inner_channels,\n                    num_layers=i if i > 0 else 1,\n                    num_upsample=i if i > 0 else 0,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                ))\n        self.conv_logits = nn.Conv2d(inner_channels, self.num_classes, 1)\n\n    def _set_things_to_void(self, gt_semantic_seg):\n        \"\"\"Merge thing classes to one class.\n\n        In PanopticFPN, the background labels will be reset from `0` to\n        `self.num_stuff_classes-1`, the foreground labels will be merged to\n        `self.num_stuff_classes`-th channel.\n        \"\"\"\n        gt_semantic_seg = gt_semantic_seg.int()\n        fg_mask = gt_semantic_seg < self.num_things_classes\n        bg_mask = (gt_semantic_seg >= self.num_things_classes) * (\n            gt_semantic_seg < self.num_things_classes + self.num_stuff_classes)\n\n        new_gt_seg = torch.clone(gt_semantic_seg)\n        new_gt_seg = torch.where(bg_mask,\n                                 gt_semantic_seg - self.num_things_classes,\n                                 new_gt_seg)\n        new_gt_seg = torch.where(fg_mask,\n                                 fg_mask.int() * self.num_stuff_classes,\n                                 new_gt_seg)\n        return new_gt_seg\n\n    def loss(self, seg_preds, gt_semantic_seg):\n        \"\"\"The loss of PanopticFPN head.\n\n        Things classes will be merged to one class in PanopticFPN.\n        \"\"\"\n        gt_semantic_seg = self._set_things_to_void(gt_semantic_seg)\n        return super().loss(seg_preds, gt_semantic_seg)\n\n    def init_weights(self):\n        super().init_weights()\n        nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)\n        self.conv_logits.bias.data.zero_()\n\n    def forward(self, x):\n        # the number of subnets must be not more than\n        # the length of features.\n        assert self.num_stages <= len(x)\n\n        feats = []\n        for i, layer in enumerate(self.conv_upsample_layers):\n            f = layer(x[self.start_level + i])\n            feats.append(f)\n\n        feats = torch.sum(torch.stack(feats, dim=0), dim=0)\n        seg_preds = self.conv_logits(feats)\n        out = dict(seg_preds=seg_preds, feats=feats)\n        return out\n"
  },
  {
    "path": "mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_panoptic_fusion_head import \\\n    BasePanopticFusionHead  # noqa: F401,F403\nfrom .heuristic_fusion_head import HeuristicFusionHead  # noqa: F401,F403\nfrom .maskformer_fusion_head import MaskFormerFusionHead  # noqa: F401,F403\n"
  },
  {
    "path": "mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nfrom mmcv.runner import BaseModule\n\nfrom ...builder import build_loss\n\n\nclass BasePanopticFusionHead(BaseModule, metaclass=ABCMeta):\n    \"\"\"Base class for panoptic heads.\"\"\"\n\n    def __init__(self,\n                 num_things_classes=80,\n                 num_stuff_classes=53,\n                 test_cfg=None,\n                 loss_panoptic=None,\n                 init_cfg=None,\n                 **kwargs):\n        super(BasePanopticFusionHead, self).__init__(init_cfg)\n        self.num_things_classes = num_things_classes\n        self.num_stuff_classes = num_stuff_classes\n        self.num_classes = num_things_classes + num_stuff_classes\n        self.test_cfg = test_cfg\n\n        if loss_panoptic:\n            self.loss_panoptic = build_loss(loss_panoptic)\n        else:\n            self.loss_panoptic = None\n\n    @property\n    def with_loss(self):\n        \"\"\"bool: whether the panoptic head contains loss function.\"\"\"\n        return self.loss_panoptic is not None\n\n    @abstractmethod\n    def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):\n        \"\"\"Forward function during training.\"\"\"\n\n    @abstractmethod\n    def simple_test(self,\n                    img_metas,\n                    det_labels,\n                    mask_preds,\n                    seg_preds,\n                    det_bboxes,\n                    cfg=None,\n                    **kwargs):\n        \"\"\"Test without augmentation.\"\"\"\n"
  },
  {
    "path": "mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET\nfrom mmdet.models.builder import HEADS\nfrom .base_panoptic_fusion_head import BasePanopticFusionHead\n\n\n@HEADS.register_module()\nclass HeuristicFusionHead(BasePanopticFusionHead):\n    \"\"\"Fusion Head with Heuristic method.\"\"\"\n\n    def __init__(self,\n                 num_things_classes=80,\n                 num_stuff_classes=53,\n                 test_cfg=None,\n                 init_cfg=None,\n                 **kwargs):\n        super(HeuristicFusionHead,\n              self).__init__(num_things_classes, num_stuff_classes, test_cfg,\n                             None, init_cfg, **kwargs)\n\n    def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):\n        \"\"\"HeuristicFusionHead has no training loss.\"\"\"\n        return dict()\n\n    def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):\n        \"\"\"Lay instance masks to a result map.\n\n        Args:\n            bboxes: The bboxes results, (K, 4).\n            labels: The labels of bboxes, (K, ).\n            masks: The instance masks, (K, H, W).\n            overlap_thr: Threshold to determine whether two masks overlap.\n                default: 0.5.\n\n        Returns:\n            Tensor: The result map, (H, W).\n        \"\"\"\n        num_insts = bboxes.shape[0]\n        id_map = torch.zeros(\n            masks.shape[-2:], device=bboxes.device, dtype=torch.long)\n        if num_insts == 0:\n            return id_map, labels\n\n        scores, bboxes = bboxes[:, -1], bboxes[:, :4]\n\n        # Sort by score to use heuristic fusion\n        order = torch.argsort(-scores)\n        bboxes = bboxes[order]\n        labels = labels[order]\n        segm_masks = masks[order]\n\n        instance_id = 1\n        left_labels = []\n        for idx in range(bboxes.shape[0]):\n            _cls = labels[idx]\n            _mask = segm_masks[idx]\n            instance_id_map = torch.ones_like(\n                _mask, dtype=torch.long) * instance_id\n            area = _mask.sum()\n            if area == 0:\n                continue\n\n            pasted = id_map > 0\n            intersect = (_mask * pasted).sum()\n            if (intersect / (area + 1e-5)) > overlap_thr:\n                continue\n\n            _part = _mask * (~pasted)\n            id_map = torch.where(_part, instance_id_map, id_map)\n            left_labels.append(_cls)\n            instance_id += 1\n\n        if len(left_labels) > 0:\n            instance_labels = torch.stack(left_labels)\n        else:\n            instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)\n        assert instance_id == (len(instance_labels) + 1)\n        return id_map, instance_labels\n\n    def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,\n                    **kwargs):\n        \"\"\"Fuse the results of instance and semantic segmentations.\n\n        Args:\n            det_bboxes: The bboxes results, (K, 4).\n            det_labels: The labels of bboxes, (K,).\n            mask_preds: The masks results, (K, H, W).\n            seg_preds: The semantic segmentation results,\n                (K, num_stuff + 1, H, W).\n\n        Returns:\n            Tensor : The panoptic segmentation result, (H, W).\n        \"\"\"\n        mask_preds = mask_preds >= self.test_cfg.mask_thr_binary\n        id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,\n                                         self.test_cfg.mask_overlap)\n\n        seg_results = seg_preds.argmax(dim=0)\n        seg_results = seg_results + self.num_things_classes\n\n        pan_results = seg_results\n        instance_id = 1\n        for idx in range(det_labels.shape[0]):\n            _mask = id_map == (idx + 1)\n            if _mask.sum() == 0:\n                continue\n            _cls = labels[idx]\n            # simply trust detection\n            segment_id = _cls + instance_id * INSTANCE_OFFSET\n            pan_results[_mask] = segment_id\n            instance_id += 1\n\n        ids, counts = torch.unique(\n            pan_results % INSTANCE_OFFSET, return_counts=True)\n        stuff_ids = ids[ids >= self.num_things_classes]\n        stuff_counts = counts[ids >= self.num_things_classes]\n        ignore_stuff_ids = stuff_ids[\n            stuff_counts < self.test_cfg.stuff_area_limit]\n\n        assert pan_results.ndim == 2\n        pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(\n            1, 1, -1)).any(dim=2)] = self.num_classes\n\n        return pan_results\n"
  },
  {
    "path": "mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn.functional as F\n\nfrom mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET\nfrom mmdet.core.mask import mask2bbox\nfrom mmdet.models.builder import HEADS\nfrom .base_panoptic_fusion_head import BasePanopticFusionHead\n\n\n@HEADS.register_module()\nclass MaskFormerFusionHead(BasePanopticFusionHead):\n\n    def __init__(self,\n                 num_things_classes=80,\n                 num_stuff_classes=53,\n                 test_cfg=None,\n                 loss_panoptic=None,\n                 init_cfg=None,\n                 **kwargs):\n        super().__init__(num_things_classes, num_stuff_classes, test_cfg,\n                         loss_panoptic, init_cfg, **kwargs)\n\n    def forward_train(self, **kwargs):\n        \"\"\"MaskFormerFusionHead has no training loss.\"\"\"\n        return dict()\n\n    def panoptic_postprocess(self, mask_cls, mask_pred):\n        \"\"\"Panoptic segmengation inference.\n\n        Args:\n            mask_cls (Tensor): Classfication outputs of shape\n                (num_queries, cls_out_channels) for a image.\n                Note `cls_out_channels` should includes\n                background.\n            mask_pred (Tensor): Mask outputs of shape\n                (num_queries, h, w) for a image.\n\n        Returns:\n            Tensor: Panoptic segment result of shape \\\n                (h, w), each element in Tensor means: \\\n                ``segment_id = _cls + instance_id * INSTANCE_OFFSET``.\n        \"\"\"\n        object_mask_thr = self.test_cfg.get('object_mask_thr', 0.8)\n        iou_thr = self.test_cfg.get('iou_thr', 0.8)\n        filter_low_score = self.test_cfg.get('filter_low_score', False)\n\n        scores, labels = F.softmax(mask_cls, dim=-1).max(-1)\n        mask_pred = mask_pred.sigmoid()\n\n        keep = labels.ne(self.num_classes) & (scores > object_mask_thr)\n        cur_scores = scores[keep]\n        cur_classes = labels[keep]\n        cur_masks = mask_pred[keep]\n\n        cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks\n\n        h, w = cur_masks.shape[-2:]\n        panoptic_seg = torch.full((h, w),\n                                  self.num_classes,\n                                  dtype=torch.int32,\n                                  device=cur_masks.device)\n        if cur_masks.shape[0] == 0:\n            # We didn't detect any mask :(\n            pass\n        else:\n            cur_mask_ids = cur_prob_masks.argmax(0)\n            instance_id = 1\n            for k in range(cur_classes.shape[0]):\n                pred_class = int(cur_classes[k].item())\n                isthing = pred_class < self.num_things_classes\n                mask = cur_mask_ids == k\n                mask_area = mask.sum().item()\n                original_area = (cur_masks[k] >= 0.5).sum().item()\n\n                if filter_low_score:\n                    mask = mask & (cur_masks[k] >= 0.5)\n\n                if mask_area > 0 and original_area > 0:\n                    if mask_area / original_area < iou_thr:\n                        continue\n\n                    if not isthing:\n                        # different stuff regions of same class will be\n                        # merged here, and stuff share the instance_id 0.\n                        panoptic_seg[mask] = pred_class\n                    else:\n                        panoptic_seg[mask] = (\n                            pred_class + instance_id * INSTANCE_OFFSET)\n                        instance_id += 1\n\n        return panoptic_seg\n\n    def semantic_postprocess(self, mask_cls, mask_pred):\n        \"\"\"Semantic segmengation postprocess.\n\n        Args:\n            mask_cls (Tensor): Classfication outputs of shape\n                (num_queries, cls_out_channels) for a image.\n                Note `cls_out_channels` should includes\n                background.\n            mask_pred (Tensor): Mask outputs of shape\n                (num_queries, h, w) for a image.\n\n        Returns:\n            Tensor: Semantic segment result of shape \\\n                (cls_out_channels, h, w).\n        \"\"\"\n        # TODO add semantic segmentation result\n        raise NotImplementedError\n\n    def instance_postprocess(self, mask_cls, mask_pred):\n        \"\"\"Instance segmengation postprocess.\n\n        Args:\n            mask_cls (Tensor): Classfication outputs of shape\n                (num_queries, cls_out_channels) for a image.\n                Note `cls_out_channels` should includes\n                background.\n            mask_pred (Tensor): Mask outputs of shape\n                (num_queries, h, w) for a image.\n\n        Returns:\n            tuple[Tensor]: Instance segmentation results.\n\n            - labels_per_image (Tensor): Predicted labels,\\\n                shape (n, ).\n            - bboxes (Tensor): Bboxes and scores with shape (n, 5) of \\\n                positive region in binary mask, the last column is scores.\n            - mask_pred_binary (Tensor): Instance masks of \\\n                shape (n, h, w).\n        \"\"\"\n        max_per_image = self.test_cfg.get('max_per_image', 100)\n        num_queries = mask_cls.shape[0]\n        # shape (num_queries, num_class)\n        scores = F.softmax(mask_cls, dim=-1)[:, :-1]\n        # shape (num_queries * num_class, )\n        labels = torch.arange(self.num_classes, device=mask_cls.device).\\\n            unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)\n        scores_per_image, top_indices = scores.flatten(0, 1).topk(\n            max_per_image, sorted=False)\n        labels_per_image = labels[top_indices]\n\n        query_indices = top_indices // self.num_classes\n        mask_pred = mask_pred[query_indices]\n\n        # extract things\n        is_thing = labels_per_image < self.num_things_classes\n        scores_per_image = scores_per_image[is_thing]\n        labels_per_image = labels_per_image[is_thing]\n        mask_pred = mask_pred[is_thing]\n\n        mask_pred_binary = (mask_pred > 0).float()\n        mask_scores_per_image = (mask_pred.sigmoid() *\n                                 mask_pred_binary).flatten(1).sum(1) / (\n                                     mask_pred_binary.flatten(1).sum(1) + 1e-6)\n        det_scores = scores_per_image * mask_scores_per_image\n        mask_pred_binary = mask_pred_binary.bool()\n        bboxes = mask2bbox(mask_pred_binary)\n        bboxes = torch.cat([bboxes, det_scores[:, None]], dim=-1)\n\n        return labels_per_image, bboxes, mask_pred_binary\n\n    def simple_test(self,\n                    mask_cls_results,\n                    mask_pred_results,\n                    img_metas,\n                    rescale=False,\n                    **kwargs):\n        \"\"\"Test segment without test-time aumengtation.\n\n        Only the output of last decoder layers was used.\n\n        Args:\n            mask_cls_results (Tensor): Mask classification logits,\n                shape (batch_size, num_queries, cls_out_channels).\n                Note `cls_out_channels` should includes background.\n            mask_pred_results (Tensor): Mask logits, shape\n                (batch_size, num_queries, h, w).\n            img_metas (list[dict]): List of image information.\n            rescale (bool, optional): If True, return boxes in\n                original image space. Default False.\n\n        Returns:\n            list[dict[str, Tensor | tuple[Tensor]]]: Semantic segmentation \\\n                results and panoptic segmentation results for each \\\n                image.\n\n            .. code-block:: none\n\n                [\n                    {\n                        'pan_results': Tensor, # shape = [h, w]\n                        'ins_results': tuple[Tensor],\n                        # semantic segmentation results are not supported yet\n                        'sem_results': Tensor\n                    },\n                    ...\n                ]\n        \"\"\"\n        panoptic_on = self.test_cfg.get('panoptic_on', True)\n        semantic_on = self.test_cfg.get('semantic_on', False)\n        instance_on = self.test_cfg.get('instance_on', False)\n        assert not semantic_on, 'segmantic segmentation '\\\n            'results are not supported yet.'\n\n        results = []\n        for mask_cls_result, mask_pred_result, meta in zip(\n                mask_cls_results, mask_pred_results, img_metas):\n            # remove padding\n            img_height, img_width = meta['img_shape'][:2]\n            mask_pred_result = mask_pred_result[:, :img_height, :img_width]\n\n            if rescale:\n                # return result in original resolution\n                ori_height, ori_width = meta['ori_shape'][:2]\n                mask_pred_result = F.interpolate(\n                    mask_pred_result[:, None],\n                    size=(ori_height, ori_width),\n                    mode='bilinear',\n                    align_corners=False)[:, 0]\n\n            result = dict()\n            if panoptic_on:\n                pan_results = self.panoptic_postprocess(\n                    mask_cls_result, mask_pred_result)\n                result['pan_results'] = pan_results\n\n            if instance_on:\n                ins_results = self.instance_postprocess(\n                    mask_cls_result, mask_pred_result)\n                result['ins_results'] = ins_results\n\n            if semantic_on:\n                sem_results = self.semantic_postprocess(\n                    mask_cls_result, mask_pred_result)\n                result['sem_results'] = sem_results\n\n            results.append(result)\n\n        return results\n"
  },
  {
    "path": "mmdet/models/utils/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d\nfrom .builder import build_linear_layer, build_transformer\nfrom .ckpt_convert import pvt_convert\nfrom .conv_upsample import ConvUpsample\nfrom .csp_layer import CSPLayer\nfrom .gaussian_target import gaussian_radius, gen_gaussian_target\nfrom .inverted_residual import InvertedResidual\nfrom .make_divisible import make_divisible\nfrom .misc import interpolate_as, sigmoid_geometric_mean\nfrom .normed_predictor import NormedConv2d, NormedLinear\nfrom .panoptic_gt_processing import preprocess_panoptic_gt\nfrom .point_sample import (get_uncertain_point_coords_with_randomness,\n                           get_uncertainty)\nfrom .positional_encoding import (LearnedPositionalEncoding,\n                                  SinePositionalEncoding)\nfrom .res_layer import ResLayer, SimplifiedBasicBlock\nfrom .se_layer import DyReLU, SELayer\nfrom .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,\n                          DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,\n                          nlc_to_nchw)\n\n__all__ = [\n    'ResLayer', 'gaussian_radius', 'gen_gaussian_target',\n    'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',\n    'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',\n    'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',\n    'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',\n    'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',\n    'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',\n    'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',\n    'preprocess_panoptic_gt', 'DyReLU',\n    'get_uncertain_point_coords_with_randomness', 'get_uncertainty'\n]\n"
  },
  {
    "path": "mmdet/models/utils/brick_wrappers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version\n\nif torch.__version__ == 'parrots':\n    TORCH_VERSION = torch.__version__\nelse:\n    # torch.__version__ could be 1.3.1+cu92, we only need the first two\n    # for comparison\n    TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])\n\n\ndef adaptive_avg_pool2d(input, output_size):\n    \"\"\"Handle empty batch dimension to adaptive_avg_pool2d.\n\n    Args:\n        input (tensor): 4D tensor.\n        output_size (int, tuple[int,int]): the target output size.\n    \"\"\"\n    if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):\n        if isinstance(output_size, int):\n            output_size = [output_size, output_size]\n        output_size = [*input.shape[:2], *output_size]\n        empty = NewEmptyTensorOp.apply(input, output_size)\n        return empty\n    else:\n        return F.adaptive_avg_pool2d(input, output_size)\n\n\nclass AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d):\n    \"\"\"Handle empty batch dimension to AdaptiveAvgPool2d.\"\"\"\n\n    def forward(self, x):\n        # PyTorch 1.9 does not support empty tensor inference yet\n        if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):\n            output_size = self.output_size\n            if isinstance(output_size, int):\n                output_size = [output_size, output_size]\n            else:\n                output_size = [\n                    v if v is not None else d\n                    for v, d in zip(output_size,\n                                    x.size()[-2:])\n                ]\n            output_size = [*x.shape[:2], *output_size]\n            empty = NewEmptyTensorOp.apply(x, output_size)\n            return empty\n\n        return super().forward(x)\n"
  },
  {
    "path": "mmdet/models/utils/builder.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.utils import Registry, build_from_cfg\n\nTRANSFORMER = Registry('Transformer')\nLINEAR_LAYERS = Registry('linear layers')\n\n\ndef build_transformer(cfg, default_args=None):\n    \"\"\"Builder for Transformer.\"\"\"\n    return build_from_cfg(cfg, TRANSFORMER, default_args)\n\n\nLINEAR_LAYERS.register_module('Linear', module=nn.Linear)\n\n\ndef build_linear_layer(cfg, *args, **kwargs):\n    \"\"\"Build linear layer.\n    Args:\n        cfg (None or dict): The linear layer config, which should contain:\n            - type (str): Layer type.\n            - layer args: Args needed to instantiate an linear layer.\n        args (argument list): Arguments passed to the `__init__`\n            method of the corresponding linear layer.\n        kwargs (keyword arguments): Keyword arguments passed to the `__init__`\n            method of the corresponding linear layer.\n    Returns:\n        nn.Module: Created linear layer.\n    \"\"\"\n    if cfg is None:\n        cfg_ = dict(type='Linear')\n    else:\n        if not isinstance(cfg, dict):\n            raise TypeError('cfg must be a dict')\n        if 'type' not in cfg:\n            raise KeyError('the cfg dict must contain the key \"type\"')\n        cfg_ = cfg.copy()\n\n    layer_type = cfg_.pop('type')\n    if layer_type not in LINEAR_LAYERS:\n        raise KeyError(f'Unrecognized linear type {layer_type}')\n    else:\n        linear_layer = LINEAR_LAYERS.get(layer_type)\n\n    layer = linear_layer(*args, **kwargs, **cfg_)\n\n    return layer\n"
  },
  {
    "path": "mmdet/models/utils/ckpt_convert.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\n# This script consists of several convert functions which\n# can modify the weights of model in original repo to be\n# pre-trained weights.\n\nfrom collections import OrderedDict\n\nimport torch\n\n\ndef pvt_convert(ckpt):\n    new_ckpt = OrderedDict()\n    # Process the concat between q linear weights and kv linear weights\n    use_abs_pos_embed = False\n    use_conv_ffn = False\n    for k in ckpt.keys():\n        if k.startswith('pos_embed'):\n            use_abs_pos_embed = True\n        if k.find('dwconv') >= 0:\n            use_conv_ffn = True\n    for k, v in ckpt.items():\n        if k.startswith('head'):\n            continue\n        if k.startswith('norm.'):\n            continue\n        if k.startswith('cls_token'):\n            continue\n        if k.startswith('pos_embed'):\n            stage_i = int(k.replace('pos_embed', ''))\n            new_k = k.replace(f'pos_embed{stage_i}',\n                              f'layers.{stage_i - 1}.1.0.pos_embed')\n            if stage_i == 4 and v.size(1) == 50:  # 1 (cls token) + 7 * 7\n                new_v = v[:, 1:, :]  # remove cls token\n            else:\n                new_v = v\n        elif k.startswith('patch_embed'):\n            stage_i = int(k.split('.')[0].replace('patch_embed', ''))\n            new_k = k.replace(f'patch_embed{stage_i}',\n                              f'layers.{stage_i - 1}.0')\n            new_v = v\n            if 'proj.' in new_k:\n                new_k = new_k.replace('proj.', 'projection.')\n        elif k.startswith('block'):\n            stage_i = int(k.split('.')[0].replace('block', ''))\n            layer_i = int(k.split('.')[1])\n            new_layer_i = layer_i + use_abs_pos_embed\n            new_k = k.replace(f'block{stage_i}.{layer_i}',\n                              f'layers.{stage_i - 1}.1.{new_layer_i}')\n            new_v = v\n            if 'attn.q.' in new_k:\n                sub_item_k = k.replace('q.', 'kv.')\n                new_k = new_k.replace('q.', 'attn.in_proj_')\n                new_v = torch.cat([v, ckpt[sub_item_k]], dim=0)\n            elif 'attn.kv.' in new_k:\n                continue\n            elif 'attn.proj.' in new_k:\n                new_k = new_k.replace('proj.', 'attn.out_proj.')\n            elif 'attn.sr.' in new_k:\n                new_k = new_k.replace('sr.', 'sr.')\n            elif 'mlp.' in new_k:\n                string = f'{new_k}-'\n                new_k = new_k.replace('mlp.', 'ffn.layers.')\n                if 'fc1.weight' in new_k or 'fc2.weight' in new_k:\n                    new_v = v.reshape((*v.shape, 1, 1))\n                new_k = new_k.replace('fc1.', '0.')\n                new_k = new_k.replace('dwconv.dwconv.', '1.')\n                if use_conv_ffn:\n                    new_k = new_k.replace('fc2.', '4.')\n                else:\n                    new_k = new_k.replace('fc2.', '3.')\n                string += f'{new_k} {v.shape}-{new_v.shape}'\n        elif k.startswith('norm'):\n            stage_i = int(k[4])\n            new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2')\n            new_v = v\n        else:\n            new_k = k\n            new_v = v\n        new_ckpt[new_k] = new_v\n\n    return new_ckpt\n\n\ndef swin_converter(ckpt):\n\n    new_ckpt = OrderedDict()\n\n    def correct_unfold_reduction_order(x):\n        out_channel, in_channel = x.shape\n        x = x.reshape(out_channel, 4, in_channel // 4)\n        x = x[:, [0, 2, 1, 3], :].transpose(1,\n                                            2).reshape(out_channel, in_channel)\n        return x\n\n    def correct_unfold_norm_order(x):\n        in_channel = x.shape[0]\n        x = x.reshape(4, in_channel // 4)\n        x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)\n        return x\n\n    for k, v in ckpt.items():\n        if k.startswith('head'):\n            continue\n        elif k.startswith('layers'):\n            new_v = v\n            if 'attn.' in k:\n                new_k = k.replace('attn.', 'attn.w_msa.')\n            elif 'mlp.' in k:\n                if 'mlp.fc1.' in k:\n                    new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')\n                elif 'mlp.fc2.' in k:\n                    new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')\n                else:\n                    new_k = k.replace('mlp.', 'ffn.')\n            elif 'downsample' in k:\n                new_k = k\n                if 'reduction.' in k:\n                    new_v = correct_unfold_reduction_order(v)\n                elif 'norm.' in k:\n                    new_v = correct_unfold_norm_order(v)\n            else:\n                new_k = k\n            new_k = new_k.replace('layers', 'stages', 1)\n        elif k.startswith('patch_embed'):\n            new_v = v\n            if 'proj' in k:\n                new_k = k.replace('proj', 'projection')\n            else:\n                new_k = k\n        else:\n            new_v = v\n            new_k = k\n\n        new_ckpt['backbone.' + new_k] = new_v\n\n    return new_ckpt\n"
  },
  {
    "path": "mmdet/models/utils/conv_upsample.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn.functional as F\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule, ModuleList\n\n\nclass ConvUpsample(BaseModule):\n    \"\"\"ConvUpsample performs 2x upsampling after Conv.\n\n    There are several `ConvModule` layers. In the first few layers, upsampling\n    will be applied after each layer of convolution. The number of upsampling\n    must be no more than the number of ConvModule layers.\n\n    Args:\n        in_channels (int): Number of channels in the input feature map.\n        inner_channels (int): Number of channels produced by the convolution.\n        num_layers (int): Number of convolution layers.\n        num_upsample (int | optional): Number of upsampling layer. Must be no\n            more than num_layers. Upsampling will be applied after the first\n            ``num_upsample`` layers of convolution. Default: ``num_layers``.\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer. Default: None.\n        init_cfg (dict): Config dict for initialization. Default: None.\n        kwargs (key word augments): Other augments used in ConvModule.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 inner_channels,\n                 num_layers=1,\n                 num_upsample=None,\n                 conv_cfg=None,\n                 norm_cfg=None,\n                 init_cfg=None,\n                 **kwargs):\n        super(ConvUpsample, self).__init__(init_cfg)\n        if num_upsample is None:\n            num_upsample = num_layers\n        assert num_upsample <= num_layers, \\\n            f'num_upsample({num_upsample})must be no more than ' \\\n            f'num_layers({num_layers})'\n        self.num_layers = num_layers\n        self.num_upsample = num_upsample\n        self.conv = ModuleList()\n        for i in range(num_layers):\n            self.conv.append(\n                ConvModule(\n                    in_channels,\n                    inner_channels,\n                    3,\n                    padding=1,\n                    stride=1,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    **kwargs))\n            in_channels = inner_channels\n\n    def forward(self, x):\n        num_upsample = self.num_upsample\n        for i in range(self.num_layers):\n            x = self.conv[i](x)\n            if num_upsample > 0:\n                num_upsample -= 1\n                x = F.interpolate(\n                    x, scale_factor=2, mode='bilinear', align_corners=False)\n        return x\n"
  },
  {
    "path": "mmdet/models/utils/csp_layer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, DepthwiseSeparableConvModule\nfrom mmcv.runner import BaseModule\n\n\nclass DarknetBottleneck(BaseModule):\n    \"\"\"The basic bottleneck block used in Darknet.\n\n    Each ResBlock consists of two ConvModules and the input is added to the\n    final output. Each ConvModule is composed of Conv, BN, and LeakyReLU.\n    The first convLayer has filter size of 1x1 and the second one has the\n    filter size of 3x3.\n\n    Args:\n        in_channels (int): The input channels of this Module.\n        out_channels (int): The output channels of this Module.\n        expansion (int): The kernel size of the convolution. Default: 0.5\n        add_identity (bool): Whether to add identity to the out.\n            Default: True\n        use_depthwise (bool): Whether to use depthwise separable convolution.\n            Default: False\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN').\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='Swish').\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 expansion=0.5,\n                 add_identity=True,\n                 use_depthwise=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish'),\n                 init_cfg=None):\n        super().__init__(init_cfg)\n        hidden_channels = int(out_channels * expansion)\n        conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule\n        self.conv1 = ConvModule(\n            in_channels,\n            hidden_channels,\n            1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.conv2 = conv(\n            hidden_channels,\n            out_channels,\n            3,\n            stride=1,\n            padding=1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.add_identity = \\\n            add_identity and in_channels == out_channels\n\n    def forward(self, x):\n        identity = x\n        out = self.conv1(x)\n        out = self.conv2(out)\n\n        if self.add_identity:\n            return out + identity\n        else:\n            return out\n\n\nclass CSPLayer(BaseModule):\n    \"\"\"Cross Stage Partial Layer.\n\n    Args:\n        in_channels (int): The input channels of the CSP layer.\n        out_channels (int): The output channels of the CSP layer.\n        expand_ratio (float): Ratio to adjust the number of channels of the\n            hidden layer. Default: 0.5\n        num_blocks (int): Number of blocks. Default: 1\n        add_identity (bool): Whether to add identity in blocks.\n            Default: True\n        use_depthwise (bool): Whether to depthwise separable convolution in\n            blocks. Default: False\n        conv_cfg (dict, optional): Config dict for convolution layer.\n            Default: None, which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN')\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='Swish')\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 expand_ratio=0.5,\n                 num_blocks=1,\n                 add_identity=True,\n                 use_depthwise=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),\n                 act_cfg=dict(type='Swish'),\n                 init_cfg=None):\n        super().__init__(init_cfg)\n        mid_channels = int(out_channels * expand_ratio)\n        self.main_conv = ConvModule(\n            in_channels,\n            mid_channels,\n            1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.short_conv = ConvModule(\n            in_channels,\n            mid_channels,\n            1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n        self.final_conv = ConvModule(\n            2 * mid_channels,\n            out_channels,\n            1,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n        self.blocks = nn.Sequential(*[\n            DarknetBottleneck(\n                mid_channels,\n                mid_channels,\n                1.0,\n                add_identity,\n                use_depthwise,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg) for _ in range(num_blocks)\n        ])\n\n    def forward(self, x):\n        x_short = self.short_conv(x)\n\n        x_main = self.main_conv(x)\n        x_main = self.blocks(x_main)\n\n        x_final = torch.cat((x_main, x_short), dim=1)\n        return self.final_conv(x_final)\n"
  },
  {
    "path": "mmdet/models/utils/gaussian_target.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom math import sqrt\n\nimport torch\nimport torch.nn.functional as F\n\n\ndef gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'):\n    \"\"\"Generate 2D gaussian kernel.\n\n    Args:\n        radius (int): Radius of gaussian kernel.\n        sigma (int): Sigma of gaussian function. Default: 1.\n        dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32.\n        device (str): Device of gaussian tensor. Default: 'cpu'.\n\n    Returns:\n        h (Tensor): Gaussian kernel with a\n            ``(2 * radius + 1) * (2 * radius + 1)`` shape.\n    \"\"\"\n    x = torch.arange(\n        -radius, radius + 1, dtype=dtype, device=device).view(1, -1)\n    y = torch.arange(\n        -radius, radius + 1, dtype=dtype, device=device).view(-1, 1)\n\n    h = (-(x * x + y * y) / (2 * sigma * sigma)).exp()\n\n    h[h < torch.finfo(h.dtype).eps * h.max()] = 0\n    return h\n\n\ndef gen_gaussian_target(heatmap, center, radius, k=1):\n    \"\"\"Generate 2D gaussian heatmap.\n\n    Args:\n        heatmap (Tensor): Input heatmap, the gaussian kernel will cover on\n            it and maintain the max value.\n        center (list[int]): Coord of gaussian kernel's center.\n        radius (int): Radius of gaussian kernel.\n        k (int): Coefficient of gaussian kernel. Default: 1.\n\n    Returns:\n        out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.\n    \"\"\"\n    diameter = 2 * radius + 1\n    gaussian_kernel = gaussian2D(\n        radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device)\n\n    x, y = center\n\n    height, width = heatmap.shape[:2]\n\n    left, right = min(x, radius), min(width - x, radius + 1)\n    top, bottom = min(y, radius), min(height - y, radius + 1)\n\n    masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]\n    masked_gaussian = gaussian_kernel[radius - top:radius + bottom,\n                                      radius - left:radius + right]\n    out_heatmap = heatmap\n    torch.max(\n        masked_heatmap,\n        masked_gaussian * k,\n        out=out_heatmap[y - top:y + bottom, x - left:x + right])\n\n    return out_heatmap\n\n\ndef gaussian_radius(det_size, min_overlap):\n    r\"\"\"Generate 2D gaussian radius.\n\n    This function is modified from the `official github repo\n    <https://github.com/princeton-vl/CornerNet-Lite/blob/master/core/sample/\n    utils.py#L65>`_.\n\n    Given ``min_overlap``, radius could computed by a quadratic equation\n    according to Vieta's formulas.\n\n    There are 3 cases for computing gaussian radius, details are following:\n\n    - Explanation of figure: ``lt`` and ``br`` indicates the left-top and\n      bottom-right corner of ground truth box. ``x`` indicates the\n      generated corner at the limited position when ``radius=r``.\n\n    - Case1: one corner is inside the gt box and the other is outside.\n\n    .. code:: text\n\n        |<   width   >|\n\n        lt-+----------+         -\n        |  |          |         ^\n        +--x----------+--+\n        |  |          |  |\n        |  |          |  |    height\n        |  | overlap  |  |\n        |  |          |  |\n        |  |          |  |      v\n        +--+---------br--+      -\n           |          |  |\n           +----------+--x\n\n    To ensure IoU of generated box and gt box is larger than ``min_overlap``:\n\n    .. math::\n        \\cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \\ge {iou} \\quad\\Rightarrow\\quad\n        {r^2-(w+h)r+\\cfrac{1-iou}{1+iou}*w*h} \\ge 0 \\\\\n        {a} = 1,\\quad{b} = {-(w+h)},\\quad{c} = {\\cfrac{1-iou}{1+iou}*w*h} \\\\\n        {r} \\le \\cfrac{-b-\\sqrt{b^2-4*a*c}}{2*a}\n\n    - Case2: both two corners are inside the gt box.\n\n    .. code:: text\n\n        |<   width   >|\n\n        lt-+----------+         -\n        |  |          |         ^\n        +--x-------+  |\n        |  |       |  |\n        |  |overlap|  |       height\n        |  |       |  |\n        |  +-------x--+\n        |          |  |         v\n        +----------+-br         -\n\n    To ensure IoU of generated box and gt box is larger than ``min_overlap``:\n\n    .. math::\n        \\cfrac{(w-2*r)*(h-2*r)}{w*h} \\ge {iou} \\quad\\Rightarrow\\quad\n        {4r^2-2(w+h)r+(1-iou)*w*h} \\ge 0 \\\\\n        {a} = 4,\\quad {b} = {-2(w+h)},\\quad {c} = {(1-iou)*w*h} \\\\\n        {r} \\le \\cfrac{-b-\\sqrt{b^2-4*a*c}}{2*a}\n\n    - Case3: both two corners are outside the gt box.\n\n    .. code:: text\n\n           |<   width   >|\n\n        x--+----------------+\n        |  |                |\n        +-lt-------------+  |   -\n        |  |             |  |   ^\n        |  |             |  |\n        |  |   overlap   |  | height\n        |  |             |  |\n        |  |             |  |   v\n        |  +------------br--+   -\n        |                |  |\n        +----------------+--x\n\n    To ensure IoU of generated box and gt box is larger than ``min_overlap``:\n\n    .. math::\n        \\cfrac{w*h}{(w+2*r)*(h+2*r)} \\ge {iou} \\quad\\Rightarrow\\quad\n        {4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \\le 0 \\\\\n        {a} = {4*iou},\\quad {b} = {2*iou*(w+h)},\\quad {c} = {(iou-1)*w*h} \\\\\n        {r} \\le \\cfrac{-b+\\sqrt{b^2-4*a*c}}{2*a}\n\n    Args:\n        det_size (list[int]): Shape of object.\n        min_overlap (float): Min IoU with ground truth for boxes generated by\n            keypoints inside the gaussian kernel.\n\n    Returns:\n        radius (int): Radius of gaussian kernel.\n    \"\"\"\n    height, width = det_size\n\n    a1 = 1\n    b1 = (height + width)\n    c1 = width * height * (1 - min_overlap) / (1 + min_overlap)\n    sq1 = sqrt(b1**2 - 4 * a1 * c1)\n    r1 = (b1 - sq1) / (2 * a1)\n\n    a2 = 4\n    b2 = 2 * (height + width)\n    c2 = (1 - min_overlap) * width * height\n    sq2 = sqrt(b2**2 - 4 * a2 * c2)\n    r2 = (b2 - sq2) / (2 * a2)\n\n    a3 = 4 * min_overlap\n    b3 = -2 * min_overlap * (height + width)\n    c3 = (min_overlap - 1) * width * height\n    sq3 = sqrt(b3**2 - 4 * a3 * c3)\n    r3 = (b3 + sq3) / (2 * a3)\n    return min(r1, r2, r3)\n\n\ndef get_local_maximum(heat, kernel=3):\n    \"\"\"Extract local maximum pixel with given kernel.\n\n    Args:\n        heat (Tensor): Target heatmap.\n        kernel (int): Kernel size of max pooling. Default: 3.\n\n    Returns:\n        heat (Tensor): A heatmap where local maximum pixels maintain its\n            own value and other positions are 0.\n    \"\"\"\n    pad = (kernel - 1) // 2\n    hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)\n    keep = (hmax == heat).float()\n    return heat * keep\n\n\ndef get_topk_from_heatmap(scores, k=20):\n    \"\"\"Get top k positions from heatmap.\n\n    Args:\n        scores (Tensor): Target heatmap with shape\n            [batch, num_classes, height, width].\n        k (int): Target number. Default: 20.\n\n    Returns:\n        tuple[torch.Tensor]: Scores, indexes, categories and coords of\n            topk keypoint. Containing following Tensors:\n\n        - topk_scores (Tensor): Max scores of each topk keypoint.\n        - topk_inds (Tensor): Indexes of each topk keypoint.\n        - topk_clses (Tensor): Categories of each topk keypoint.\n        - topk_ys (Tensor): Y-coord of each topk keypoint.\n        - topk_xs (Tensor): X-coord of each topk keypoint.\n    \"\"\"\n    batch, _, height, width = scores.size()\n    topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)\n    topk_clses = topk_inds // (height * width)\n    topk_inds = topk_inds % (height * width)\n    topk_ys = topk_inds // width\n    topk_xs = (topk_inds % width).int().float()\n    return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs\n\n\ndef gather_feat(feat, ind, mask=None):\n    \"\"\"Gather feature according to index.\n\n    Args:\n        feat (Tensor): Target feature map.\n        ind (Tensor): Target coord index.\n        mask (Tensor | None): Mask of feature map. Default: None.\n\n    Returns:\n        feat (Tensor): Gathered feature.\n    \"\"\"\n    dim = feat.size(2)\n    ind = ind.unsqueeze(2).repeat(1, 1, dim)\n    feat = feat.gather(1, ind)\n    if mask is not None:\n        mask = mask.unsqueeze(2).expand_as(feat)\n        feat = feat[mask]\n        feat = feat.view(-1, dim)\n    return feat\n\n\ndef transpose_and_gather_feat(feat, ind):\n    \"\"\"Transpose and gather feature according to index.\n\n    Args:\n        feat (Tensor): Target feature map.\n        ind (Tensor): Target coord index.\n\n    Returns:\n        feat (Tensor): Transposed and gathered feature.\n    \"\"\"\n    feat = feat.permute(0, 2, 3, 1).contiguous()\n    feat = feat.view(feat.size(0), -1, feat.size(3))\n    feat = gather_feat(feat, ind)\n    return feat\n"
  },
  {
    "path": "mmdet/models/utils/inverted_residual.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn import ConvModule\nfrom mmcv.cnn.bricks import DropPath\nfrom mmcv.runner import BaseModule\n\nfrom .se_layer import SELayer\n\n\nclass InvertedResidual(BaseModule):\n    \"\"\"Inverted Residual Block.\n\n    Args:\n        in_channels (int): The input channels of this Module.\n        out_channels (int): The output channels of this Module.\n        mid_channels (int): The input channels of the depthwise convolution.\n        kernel_size (int): The kernel size of the depthwise convolution.\n            Default: 3.\n        stride (int): The stride of the depthwise convolution. Default: 1.\n        se_cfg (dict): Config dict for se layer. Default: None, which means no\n            se layer.\n        with_expand_conv (bool): Use expand conv or not. If set False,\n            mid_channels must be the same with in_channels.\n            Default: True.\n        conv_cfg (dict): Config dict for convolution layer. Default: None,\n            which means using conv2d.\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: dict(type='BN').\n        act_cfg (dict): Config dict for activation layer.\n            Default: dict(type='ReLU').\n        drop_path_rate (float): stochastic depth rate. Defaults to 0.\n        with_cp (bool): Use checkpoint or not. Using checkpoint will save some\n            memory while slowing down the training speed. Default: False.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n\n    Returns:\n        Tensor: The output tensor.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 mid_channels,\n                 kernel_size=3,\n                 stride=1,\n                 se_cfg=None,\n                 with_expand_conv=True,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 act_cfg=dict(type='ReLU'),\n                 drop_path_rate=0.,\n                 with_cp=False,\n                 init_cfg=None):\n        super(InvertedResidual, self).__init__(init_cfg)\n        self.with_res_shortcut = (stride == 1 and in_channels == out_channels)\n        assert stride in [1, 2], f'stride must in [1, 2]. ' \\\n            f'But received {stride}.'\n        self.with_cp = with_cp\n        self.drop_path = DropPath(\n            drop_path_rate) if drop_path_rate > 0 else nn.Identity()\n        self.with_se = se_cfg is not None\n        self.with_expand_conv = with_expand_conv\n\n        if self.with_se:\n            assert isinstance(se_cfg, dict)\n        if not self.with_expand_conv:\n            assert mid_channels == in_channels\n\n        if self.with_expand_conv:\n            self.expand_conv = ConvModule(\n                in_channels=in_channels,\n                out_channels=mid_channels,\n                kernel_size=1,\n                stride=1,\n                padding=0,\n                conv_cfg=conv_cfg,\n                norm_cfg=norm_cfg,\n                act_cfg=act_cfg)\n        self.depthwise_conv = ConvModule(\n            in_channels=mid_channels,\n            out_channels=mid_channels,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=kernel_size // 2,\n            groups=mid_channels,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=act_cfg)\n\n        if self.with_se:\n            self.se = SELayer(**se_cfg)\n\n        self.linear_conv = ConvModule(\n            in_channels=mid_channels,\n            out_channels=out_channels,\n            kernel_size=1,\n            stride=1,\n            padding=0,\n            conv_cfg=conv_cfg,\n            norm_cfg=norm_cfg,\n            act_cfg=None)\n\n    def forward(self, x):\n\n        def _inner_forward(x):\n            out = x\n\n            if self.with_expand_conv:\n                out = self.expand_conv(out)\n\n            out = self.depthwise_conv(out)\n\n            if self.with_se:\n                out = self.se(out)\n\n            out = self.linear_conv(out)\n\n            if self.with_res_shortcut:\n                return x + self.drop_path(out)\n            else:\n                return out\n\n        if self.with_cp and x.requires_grad:\n            out = cp.checkpoint(_inner_forward, x)\n        else:\n            out = _inner_forward(x)\n\n        return out\n"
  },
  {
    "path": "mmdet/models/utils/make_divisible.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\ndef make_divisible(value, divisor, min_value=None, min_ratio=0.9):\n    \"\"\"Make divisible function.\n\n    This function rounds the channel number to the nearest value that can be\n    divisible by the divisor. It is taken from the original tf repo. It ensures\n    that all layers have a channel number that is divisible by divisor. It can\n    be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py  # noqa\n\n    Args:\n        value (int): The original channel number.\n        divisor (int): The divisor to fully divide the channel number.\n        min_value (int): The minimum value of the output channel.\n            Default: None, means that the minimum value equal to the divisor.\n        min_ratio (float): The minimum ratio of the rounded channel number to\n            the original channel number. Default: 0.9.\n\n    Returns:\n        int: The modified output channel number.\n    \"\"\"\n\n    if min_value is None:\n        min_value = divisor\n    new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)\n    # Make sure that round down does not go down by more than (1-min_ratio).\n    if new_value < min_ratio * value:\n        new_value += divisor\n    return new_value\n"
  },
  {
    "path": "mmdet/models/utils/misc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom torch.autograd import Function\nfrom torch.nn import functional as F\n\n\nclass SigmoidGeometricMean(Function):\n    \"\"\"Forward and backward function of geometric mean of two sigmoid\n    functions.\n\n    This implementation with analytical gradient function substitutes\n    the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The\n    original implementation incurs none during gradient backprapagation\n    if both x and y are very small values.\n    \"\"\"\n\n    @staticmethod\n    def forward(ctx, x, y):\n        x_sigmoid = x.sigmoid()\n        y_sigmoid = y.sigmoid()\n        z = (x_sigmoid * y_sigmoid).sqrt()\n        ctx.save_for_backward(x_sigmoid, y_sigmoid, z)\n        return z\n\n    @staticmethod\n    def backward(ctx, grad_output):\n        x_sigmoid, y_sigmoid, z = ctx.saved_tensors\n        grad_x = grad_output * z * (1 - x_sigmoid) / 2\n        grad_y = grad_output * z * (1 - y_sigmoid) / 2\n        return grad_x, grad_y\n\n\nsigmoid_geometric_mean = SigmoidGeometricMean.apply\n\n\ndef interpolate_as(source, target, mode='bilinear', align_corners=False):\n    \"\"\"Interpolate the `source` to the shape of the `target`.\n\n    The `source` must be a Tensor, but the `target` can be a Tensor or a\n    np.ndarray with the shape (..., target_h, target_w).\n\n    Args:\n        source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or\n            (N, C, H, W).\n        target (Tensor | np.ndarray): The interpolation target with the shape\n            (..., target_h, target_w).\n        mode (str): Algorithm used for interpolation. The options are the\n            same as those in F.interpolate(). Default: ``'bilinear'``.\n        align_corners (bool): The same as the argument in F.interpolate().\n\n    Returns:\n        Tensor: The interpolated source Tensor.\n    \"\"\"\n    assert len(target.shape) >= 2\n\n    def _interpolate_as(source, target, mode='bilinear', align_corners=False):\n        \"\"\"Interpolate the `source` (4D) to the shape of the `target`.\"\"\"\n        target_h, target_w = target.shape[-2:]\n        source_h, source_w = source.shape[-2:]\n        if target_h != source_h or target_w != source_w:\n            source = F.interpolate(\n                source,\n                size=(target_h, target_w),\n                mode=mode,\n                align_corners=align_corners)\n        return source\n\n    if len(source.shape) == 3:\n        source = source[:, None, :, :]\n        source = _interpolate_as(source, target, mode, align_corners)\n        return source[:, 0, :, :]\n    else:\n        return _interpolate_as(source, target, mode, align_corners)\n"
  },
  {
    "path": "mmdet/models/utils/normed_predictor.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import CONV_LAYERS\n\nfrom .builder import LINEAR_LAYERS\n\n\n@LINEAR_LAYERS.register_module(name='NormedLinear')\nclass NormedLinear(nn.Linear):\n    \"\"\"Normalized Linear Layer.\n\n    Args:\n        tempeature (float, optional): Tempeature term. Default to 20.\n        power (int, optional): Power term. Default to 1.0.\n        eps (float, optional): The minimal value of divisor to\n             keep numerical stability. Default to 1e-6.\n    \"\"\"\n\n    def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs):\n        super(NormedLinear, self).__init__(*args, **kwargs)\n        self.tempearture = tempearture\n        self.power = power\n        self.eps = eps\n        self.init_weights()\n\n    def init_weights(self):\n        nn.init.normal_(self.weight, mean=0, std=0.01)\n        if self.bias is not None:\n            nn.init.constant_(self.bias, 0)\n\n    def forward(self, x):\n        weight_ = self.weight / (\n            self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)\n        x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)\n        x_ = x_ * self.tempearture\n\n        return F.linear(x_, weight_, self.bias)\n\n\n@CONV_LAYERS.register_module(name='NormedConv2d')\nclass NormedConv2d(nn.Conv2d):\n    \"\"\"Normalized Conv2d Layer.\n\n    Args:\n        tempeature (float, optional): Tempeature term. Default to 20.\n        power (int, optional): Power term. Default to 1.0.\n        eps (float, optional): The minimal value of divisor to\n             keep numerical stability. Default to 1e-6.\n        norm_over_kernel (bool, optional): Normalize over kernel.\n             Default to False.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 tempearture=20,\n                 power=1.0,\n                 eps=1e-6,\n                 norm_over_kernel=False,\n                 **kwargs):\n        super(NormedConv2d, self).__init__(*args, **kwargs)\n        self.tempearture = tempearture\n        self.power = power\n        self.norm_over_kernel = norm_over_kernel\n        self.eps = eps\n\n    def forward(self, x):\n        if not self.norm_over_kernel:\n            weight_ = self.weight / (\n                self.weight.norm(dim=1, keepdim=True).pow(self.power) +\n                self.eps)\n        else:\n            weight_ = self.weight / (\n                self.weight.view(self.weight.size(0), -1).norm(\n                    dim=1, keepdim=True).pow(self.power)[..., None, None] +\n                self.eps)\n        x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)\n        x_ = x_ * self.tempearture\n\n        if hasattr(self, 'conv2d_forward'):\n            x_ = self.conv2d_forward(x_, weight_)\n        else:\n            if torch.__version__ >= '1.8':\n                x_ = self._conv_forward(x_, weight_, self.bias)\n            else:\n                x_ = self._conv_forward(x_, weight_)\n        return x_\n"
  },
  {
    "path": "mmdet/models/utils/panoptic_gt_processing.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things,\n                           num_stuff, img_metas):\n    \"\"\"Preprocess the ground truth for a image.\n\n    Args:\n        gt_labels (Tensor): Ground truth labels of each bbox,\n            with shape (num_gts, ).\n        gt_masks (BitmapMasks): Ground truth masks of each instances\n            of a image, shape (num_gts, h, w).\n        gt_semantic_seg (Tensor | None): Ground truth of semantic\n            segmentation with the shape (1, h, w).\n            [0, num_thing_class - 1] means things,\n            [num_thing_class, num_class-1] means stuff,\n            255 means VOID. It's None when training instance segmentation.\n        img_metas (dict): List of image meta information.\n\n    Returns:\n        tuple: a tuple containing the following targets.\n\n            - labels (Tensor): Ground truth class indices for a\n                image, with shape (n, ), n is the sum of number\n                of stuff type and number of instance in a image.\n            - masks (Tensor): Ground truth mask for a image, with\n                shape (n, h, w). Contains stuff and things when training\n                panoptic segmentation, and things only when training\n                instance segmentation.\n    \"\"\"\n    num_classes = num_things + num_stuff\n\n    things_masks = gt_masks.pad(img_metas['pad_shape'][:2], pad_val=0)\\\n        .to_tensor(dtype=torch.bool, device=gt_labels.device)\n\n    if gt_semantic_seg is None:\n        masks = things_masks.long()\n        return gt_labels, masks\n\n    things_labels = gt_labels\n    gt_semantic_seg = gt_semantic_seg.squeeze(0)\n\n    semantic_labels = torch.unique(\n        gt_semantic_seg,\n        sorted=False,\n        return_inverse=False,\n        return_counts=False)\n    stuff_masks_list = []\n    stuff_labels_list = []\n    for label in semantic_labels:\n        if label < num_things or label >= num_classes:\n            continue\n        stuff_mask = gt_semantic_seg == label\n        stuff_masks_list.append(stuff_mask)\n        stuff_labels_list.append(label)\n\n    if len(stuff_masks_list) > 0:\n        stuff_masks = torch.stack(stuff_masks_list, dim=0)\n        stuff_labels = torch.stack(stuff_labels_list, dim=0)\n        labels = torch.cat([things_labels, stuff_labels], dim=0)\n        masks = torch.cat([things_masks, stuff_masks], dim=0)\n    else:\n        labels = things_labels\n        masks = things_masks\n\n    masks = masks.long()\n    return labels, masks\n"
  },
  {
    "path": "mmdet/models/utils/point_sample.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.ops import point_sample\n\n\ndef get_uncertainty(mask_pred, labels):\n    \"\"\"Estimate uncertainty based on pred logits.\n\n    We estimate uncertainty as L1 distance between 0.0 and the logits\n    prediction in 'mask_pred' for the foreground class in `classes`.\n\n    Args:\n        mask_pred (Tensor): mask predication logits, shape (num_rois,\n            num_classes, mask_height, mask_width).\n\n        labels (list[Tensor]): Either predicted or ground truth label for\n            each predicted mask, of length num_rois.\n\n    Returns:\n        scores (Tensor): Uncertainty scores with the most uncertain\n            locations having the highest uncertainty score,\n            shape (num_rois, 1, mask_height, mask_width)\n    \"\"\"\n    if mask_pred.shape[1] == 1:\n        gt_class_logits = mask_pred.clone()\n    else:\n        inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)\n        gt_class_logits = mask_pred[inds, labels].unsqueeze(1)\n    return -torch.abs(gt_class_logits)\n\n\ndef get_uncertain_point_coords_with_randomness(mask_pred, labels, num_points,\n                                               oversample_ratio,\n                                               importance_sample_ratio):\n    \"\"\"Get ``num_points`` most uncertain points with random points during\n    train.\n\n    Sample points in [0, 1] x [0, 1] coordinate space based on their\n    uncertainty. The uncertainties are calculated for each point using\n    'get_uncertainty()' function that takes point's logit prediction as\n    input.\n\n    Args:\n        mask_pred (Tensor): A tensor of shape (num_rois, num_classes,\n            mask_height, mask_width) for class-specific or class-agnostic\n            prediction.\n        labels (list): The ground truth class for each instance.\n        num_points (int): The number of points to sample.\n        oversample_ratio (int): Oversampling parameter.\n        importance_sample_ratio (float): Ratio of points that are sampled\n            via importnace sampling.\n\n    Returns:\n        point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)\n            that contains the coordinates sampled points.\n    \"\"\"\n    assert oversample_ratio >= 1\n    assert 0 <= importance_sample_ratio <= 1\n    batch_size = mask_pred.shape[0]\n    num_sampled = int(num_points * oversample_ratio)\n    point_coords = torch.rand(\n        batch_size, num_sampled, 2, device=mask_pred.device)\n    point_logits = point_sample(mask_pred, point_coords)\n    # It is crucial to calculate uncertainty based on the sampled\n    # prediction value for the points. Calculating uncertainties of the\n    # coarse predictions first and sampling them for points leads to\n    # incorrect results.  To illustrate this: assume uncertainty func(\n    # logits)=-abs(logits), a sampled point between two coarse\n    # predictions with -1 and 1 logits has 0 logits, and therefore 0\n    # uncertainty value. However, if we calculate uncertainties for the\n    # coarse predictions first, both will have -1 uncertainty,\n    # and sampled point will get -1 uncertainty.\n    point_uncertainties = get_uncertainty(point_logits, labels)\n    num_uncertain_points = int(importance_sample_ratio * num_points)\n    num_random_points = num_points - num_uncertain_points\n    idx = torch.topk(\n        point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]\n    shift = num_sampled * torch.arange(\n        batch_size, dtype=torch.long, device=mask_pred.device)\n    idx += shift[:, None]\n    point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(\n        batch_size, num_uncertain_points, 2)\n    if num_random_points > 0:\n        rand_roi_coords = torch.rand(\n            batch_size, num_random_points, 2, device=mask_pred.device)\n        point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)\n    return point_coords\n"
  },
  {
    "path": "mmdet/models/utils/positional_encoding.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn.bricks.transformer import POSITIONAL_ENCODING\nfrom mmcv.runner import BaseModule\n\n\n@POSITIONAL_ENCODING.register_module()\nclass SinePositionalEncoding(BaseModule):\n    \"\"\"Position encoding with sine and cosine functions.\n\n    See `End-to-End Object Detection with Transformers\n    <https://arxiv.org/pdf/2005.12872>`_ for details.\n\n    Args:\n        num_feats (int): The feature dimension for each position\n            along x-axis or y-axis. Note the final returned dimension\n            for each position is 2 times of this value.\n        temperature (int, optional): The temperature used for scaling\n            the position embedding. Defaults to 10000.\n        normalize (bool, optional): Whether to normalize the position\n            embedding. Defaults to False.\n        scale (float, optional): A scale factor that scales the position\n            embedding. The scale will be used only when `normalize` is True.\n            Defaults to 2*pi.\n        eps (float, optional): A value added to the denominator for\n            numerical stability. Defaults to 1e-6.\n        offset (float): offset add to embed when do the normalization.\n            Defaults to 0.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 num_feats,\n                 temperature=10000,\n                 normalize=False,\n                 scale=2 * math.pi,\n                 eps=1e-6,\n                 offset=0.,\n                 init_cfg=None):\n        super(SinePositionalEncoding, self).__init__(init_cfg)\n        if normalize:\n            assert isinstance(scale, (float, int)), 'when normalize is set,' \\\n                'scale should be provided and in float or int type, ' \\\n                f'found {type(scale)}'\n        self.num_feats = num_feats\n        self.temperature = temperature\n        self.normalize = normalize\n        self.scale = scale\n        self.eps = eps\n        self.offset = offset\n\n    def forward(self, mask):\n        \"\"\"Forward function for `SinePositionalEncoding`.\n\n        Args:\n            mask (Tensor): ByteTensor mask. Non-zero values representing\n                ignored positions, while zero values means valid positions\n                for this image. Shape [bs, h, w].\n\n        Returns:\n            pos (Tensor): Returned position embedding with shape\n                [bs, num_feats*2, h, w].\n        \"\"\"\n        # For convenience of exporting to ONNX, it's required to convert\n        # `masks` from bool to int.\n        mask = mask.to(torch.int)\n        not_mask = 1 - mask  # logical_not\n        y_embed = not_mask.cumsum(1, dtype=torch.float32)\n        x_embed = not_mask.cumsum(2, dtype=torch.float32)\n        if self.normalize:\n            y_embed = (y_embed + self.offset) / \\\n                      (y_embed[:, -1:, :] + self.eps) * self.scale\n            x_embed = (x_embed + self.offset) / \\\n                      (x_embed[:, :, -1:] + self.eps) * self.scale\n        dim_t = torch.arange(\n            self.num_feats, dtype=torch.float32, device=mask.device)\n        dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats)\n        pos_x = x_embed[:, :, :, None] / dim_t\n        pos_y = y_embed[:, :, :, None] / dim_t\n        # use `view` instead of `flatten` for dynamically exporting to ONNX\n        B, H, W = mask.size()\n        pos_x = torch.stack(\n            (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()),\n            dim=4).view(B, H, W, -1)\n        pos_y = torch.stack(\n            (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()),\n            dim=4).view(B, H, W, -1)\n        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n        return pos\n\n    def __repr__(self):\n        \"\"\"str: a string that describes the module\"\"\"\n        repr_str = self.__class__.__name__\n        repr_str += f'(num_feats={self.num_feats}, '\n        repr_str += f'temperature={self.temperature}, '\n        repr_str += f'normalize={self.normalize}, '\n        repr_str += f'scale={self.scale}, '\n        repr_str += f'eps={self.eps})'\n        return repr_str\n\n\n@POSITIONAL_ENCODING.register_module()\nclass LearnedPositionalEncoding(BaseModule):\n    \"\"\"Position embedding with learnable embedding weights.\n\n    Args:\n        num_feats (int): The feature dimension for each position\n            along x-axis or y-axis. The final returned dimension for\n            each position is 2 times of this value.\n        row_num_embed (int, optional): The dictionary size of row embeddings.\n            Default 50.\n        col_num_embed (int, optional): The dictionary size of col embeddings.\n            Default 50.\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n    \"\"\"\n\n    def __init__(self,\n                 num_feats,\n                 row_num_embed=50,\n                 col_num_embed=50,\n                 init_cfg=dict(type='Uniform', layer='Embedding')):\n        super(LearnedPositionalEncoding, self).__init__(init_cfg)\n        self.row_embed = nn.Embedding(row_num_embed, num_feats)\n        self.col_embed = nn.Embedding(col_num_embed, num_feats)\n        self.num_feats = num_feats\n        self.row_num_embed = row_num_embed\n        self.col_num_embed = col_num_embed\n\n    def forward(self, mask):\n        \"\"\"Forward function for `LearnedPositionalEncoding`.\n\n        Args:\n            mask (Tensor): ByteTensor mask. Non-zero values representing\n                ignored positions, while zero values means valid positions\n                for this image. Shape [bs, h, w].\n\n        Returns:\n            pos (Tensor): Returned position embedding with shape\n                [bs, num_feats*2, h, w].\n        \"\"\"\n        h, w = mask.shape[-2:]\n        x = torch.arange(w, device=mask.device)\n        y = torch.arange(h, device=mask.device)\n        x_embed = self.col_embed(x)\n        y_embed = self.row_embed(y)\n        pos = torch.cat(\n            (x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat(\n                1, w, 1)),\n            dim=-1).permute(2, 0,\n                            1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1)\n        return pos\n\n    def __repr__(self):\n        \"\"\"str: a string that describes the module\"\"\"\n        repr_str = self.__class__.__name__\n        repr_str += f'(num_feats={self.num_feats}, '\n        repr_str += f'row_num_embed={self.row_num_embed}, '\n        repr_str += f'col_num_embed={self.col_num_embed})'\n        return repr_str\n"
  },
  {
    "path": "mmdet/models/utils/res_layer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\nfrom mmcv.runner import BaseModule, Sequential\nfrom torch import nn as nn\n\n\nclass ResLayer(Sequential):\n    \"\"\"ResLayer to build ResNet style backbone.\n\n    Args:\n        block (nn.Module): block used to build ResLayer.\n        inplanes (int): inplanes of block.\n        planes (int): planes of block.\n        num_blocks (int): number of blocks.\n        stride (int): stride of the first block. Default: 1\n        avg_down (bool): Use AvgPool instead of stride conv when\n            downsampling in the bottleneck. Default: False\n        conv_cfg (dict): dictionary to construct and config conv layer.\n            Default: None\n        norm_cfg (dict): dictionary to construct and config norm layer.\n            Default: dict(type='BN')\n        downsample_first (bool): Downsample at the first block or last block.\n            False for Hourglass, True for ResNet. Default: True\n    \"\"\"\n\n    def __init__(self,\n                 block,\n                 inplanes,\n                 planes,\n                 num_blocks,\n                 stride=1,\n                 avg_down=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 downsample_first=True,\n                 **kwargs):\n        self.block = block\n\n        downsample = None\n        if stride != 1 or inplanes != planes * block.expansion:\n            downsample = []\n            conv_stride = stride\n            if avg_down:\n                conv_stride = 1\n                downsample.append(\n                    nn.AvgPool2d(\n                        kernel_size=stride,\n                        stride=stride,\n                        ceil_mode=True,\n                        count_include_pad=False))\n            downsample.extend([\n                build_conv_layer(\n                    conv_cfg,\n                    inplanes,\n                    planes * block.expansion,\n                    kernel_size=1,\n                    stride=conv_stride,\n                    bias=False),\n                build_norm_layer(norm_cfg, planes * block.expansion)[1]\n            ])\n            downsample = nn.Sequential(*downsample)\n\n        layers = []\n        if downsample_first:\n            layers.append(\n                block(\n                    inplanes=inplanes,\n                    planes=planes,\n                    stride=stride,\n                    downsample=downsample,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    **kwargs))\n            inplanes = planes * block.expansion\n            for _ in range(1, num_blocks):\n                layers.append(\n                    block(\n                        inplanes=inplanes,\n                        planes=planes,\n                        stride=1,\n                        conv_cfg=conv_cfg,\n                        norm_cfg=norm_cfg,\n                        **kwargs))\n\n        else:  # downsample_first=False is for HourglassModule\n            for _ in range(num_blocks - 1):\n                layers.append(\n                    block(\n                        inplanes=inplanes,\n                        planes=inplanes,\n                        stride=1,\n                        conv_cfg=conv_cfg,\n                        norm_cfg=norm_cfg,\n                        **kwargs))\n            layers.append(\n                block(\n                    inplanes=inplanes,\n                    planes=planes,\n                    stride=stride,\n                    downsample=downsample,\n                    conv_cfg=conv_cfg,\n                    norm_cfg=norm_cfg,\n                    **kwargs))\n        super(ResLayer, self).__init__(*layers)\n\n\nclass SimplifiedBasicBlock(BaseModule):\n    \"\"\"Simplified version of original basic residual block. This is used in\n    `SCNet <https://arxiv.org/abs/2012.10150>`_.\n\n    - Norm layer is now optional\n    - Last ReLU in forward function is removed\n    \"\"\"\n    expansion = 1\n\n    def __init__(self,\n                 inplanes,\n                 planes,\n                 stride=1,\n                 dilation=1,\n                 downsample=None,\n                 style='pytorch',\n                 with_cp=False,\n                 conv_cfg=None,\n                 norm_cfg=dict(type='BN'),\n                 dcn=None,\n                 plugins=None,\n                 init_fg=None):\n        super(SimplifiedBasicBlock, self).__init__(init_fg)\n        assert dcn is None, 'Not implemented yet.'\n        assert plugins is None, 'Not implemented yet.'\n        assert not with_cp, 'Not implemented yet.'\n        self.with_norm = norm_cfg is not None\n        with_bias = True if norm_cfg is None else False\n        self.conv1 = build_conv_layer(\n            conv_cfg,\n            inplanes,\n            planes,\n            3,\n            stride=stride,\n            padding=dilation,\n            dilation=dilation,\n            bias=with_bias)\n        if self.with_norm:\n            self.norm1_name, norm1 = build_norm_layer(\n                norm_cfg, planes, postfix=1)\n            self.add_module(self.norm1_name, norm1)\n        self.conv2 = build_conv_layer(\n            conv_cfg, planes, planes, 3, padding=1, bias=with_bias)\n        if self.with_norm:\n            self.norm2_name, norm2 = build_norm_layer(\n                norm_cfg, planes, postfix=2)\n            self.add_module(self.norm2_name, norm2)\n\n        self.relu = nn.ReLU(inplace=True)\n        self.downsample = downsample\n        self.stride = stride\n        self.dilation = dilation\n        self.with_cp = with_cp\n\n    @property\n    def norm1(self):\n        \"\"\"nn.Module: normalization layer after the first convolution layer\"\"\"\n        return getattr(self, self.norm1_name) if self.with_norm else None\n\n    @property\n    def norm2(self):\n        \"\"\"nn.Module: normalization layer after the second convolution layer\"\"\"\n        return getattr(self, self.norm2_name) if self.with_norm else None\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n\n        identity = x\n\n        out = self.conv1(x)\n        if self.with_norm:\n            out = self.norm1(out)\n        out = self.relu(out)\n\n        out = self.conv2(out)\n        if self.with_norm:\n            out = self.norm2(out)\n\n        if self.downsample is not None:\n            identity = self.downsample(x)\n\n        out += identity\n\n        return out\n"
  },
  {
    "path": "mmdet/models/utils/se_layer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.runner import BaseModule\n\n\nclass SELayer(BaseModule):\n    \"\"\"Squeeze-and-Excitation Module.\n\n    Args:\n        channels (int): The input (and output) channels of the SE layer.\n        ratio (int): Squeeze ratio in SELayer, the intermediate channel will be\n            ``int(channels/ratio)``. Default: 16.\n        conv_cfg (None or dict): Config dict for convolution layer.\n            Default: None, which means using conv2d.\n        act_cfg (dict or Sequence[dict]): Config dict for activation layer.\n            If act_cfg is a dict, two activation layers will be configurated\n            by this dict. If act_cfg is a sequence of dicts, the first\n            activation layer will be configurated by the first dict and the\n            second activation layer will be configurated by the second dict.\n            Default: (dict(type='ReLU'), dict(type='Sigmoid'))\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 channels,\n                 ratio=16,\n                 conv_cfg=None,\n                 act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),\n                 init_cfg=None):\n        super(SELayer, self).__init__(init_cfg)\n        if isinstance(act_cfg, dict):\n            act_cfg = (act_cfg, act_cfg)\n        assert len(act_cfg) == 2\n        assert mmcv.is_tuple_of(act_cfg, dict)\n        self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n        self.conv1 = ConvModule(\n            in_channels=channels,\n            out_channels=int(channels / ratio),\n            kernel_size=1,\n            stride=1,\n            conv_cfg=conv_cfg,\n            act_cfg=act_cfg[0])\n        self.conv2 = ConvModule(\n            in_channels=int(channels / ratio),\n            out_channels=channels,\n            kernel_size=1,\n            stride=1,\n            conv_cfg=conv_cfg,\n            act_cfg=act_cfg[1])\n\n    def forward(self, x):\n        out = self.global_avgpool(x)\n        out = self.conv1(out)\n        out = self.conv2(out)\n        return x * out\n\n\nclass DyReLU(BaseModule):\n    \"\"\"Dynamic ReLU (DyReLU) module.\n\n    See `Dynamic ReLU <https://arxiv.org/abs/2003.10027>`_ for details.\n    Current implementation is specialized for task-aware attention in DyHead.\n    HSigmoid arguments in default act_cfg follow DyHead official code.\n    https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py\n\n    Args:\n        channels (int): The input (and output) channels of DyReLU module.\n        ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module,\n            the intermediate channel will be ``int(channels/ratio)``.\n            Default: 4.\n        conv_cfg (None or dict): Config dict for convolution layer.\n            Default: None, which means using conv2d.\n        act_cfg (dict or Sequence[dict]): Config dict for activation layer.\n            If act_cfg is a dict, two activation layers will be configurated\n            by this dict. If act_cfg is a sequence of dicts, the first\n            activation layer will be configurated by the first dict and the\n            second activation layer will be configurated by the second dict.\n            Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,\n            divisor=6.0))\n        init_cfg (dict or list[dict], optional): Initialization config dict.\n            Default: None\n    \"\"\"\n\n    def __init__(self,\n                 channels,\n                 ratio=4,\n                 conv_cfg=None,\n                 act_cfg=(dict(type='ReLU'),\n                          dict(type='HSigmoid', bias=3.0, divisor=6.0)),\n                 init_cfg=None):\n        super().__init__(init_cfg=init_cfg)\n        if isinstance(act_cfg, dict):\n            act_cfg = (act_cfg, act_cfg)\n        assert len(act_cfg) == 2\n        assert mmcv.is_tuple_of(act_cfg, dict)\n        self.channels = channels\n        self.expansion = 4  # for a1, b1, a2, b2\n        self.global_avgpool = nn.AdaptiveAvgPool2d(1)\n        self.conv1 = ConvModule(\n            in_channels=channels,\n            out_channels=int(channels / ratio),\n            kernel_size=1,\n            stride=1,\n            conv_cfg=conv_cfg,\n            act_cfg=act_cfg[0])\n        self.conv2 = ConvModule(\n            in_channels=int(channels / ratio),\n            out_channels=channels * self.expansion,\n            kernel_size=1,\n            stride=1,\n            conv_cfg=conv_cfg,\n            act_cfg=act_cfg[1])\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        coeffs = self.global_avgpool(x)\n        coeffs = self.conv1(coeffs)\n        coeffs = self.conv2(coeffs) - 0.5  # value range: [-0.5, 0.5]\n        a1, b1, a2, b2 = torch.split(coeffs, self.channels, dim=1)\n        a1 = a1 * 2.0 + 1.0  # [-1.0, 1.0] + 1.0\n        a2 = a2 * 2.0  # [-1.0, 1.0]\n        out = torch.max(x * a1 + b1, x * a2 + b2)\n        return out\n"
  },
  {
    "path": "mmdet/models/utils/transformer.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nimport warnings\nfrom typing import Sequence\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn import (build_activation_layer, build_conv_layer,\n                      build_norm_layer, xavier_init)\nfrom mmcv.cnn.bricks.registry import (TRANSFORMER_LAYER,\n                                      TRANSFORMER_LAYER_SEQUENCE)\nfrom mmcv.cnn.bricks.transformer import (BaseTransformerLayer,\n                                         TransformerLayerSequence,\n                                         build_transformer_layer_sequence)\nfrom mmcv.runner.base_module import BaseModule\nfrom mmcv.utils import to_2tuple\nfrom torch.nn.init import normal_\n\nfrom mmdet.models.utils.builder import TRANSFORMER\n\ntry:\n    from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention\n\nexcept ImportError:\n    warnings.warn(\n        '`MultiScaleDeformableAttention` in MMCV has been moved to '\n        '`mmcv.ops.multi_scale_deform_attn`, please update your MMCV')\n    from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention\n\n\ndef nlc_to_nchw(x, hw_shape):\n    \"\"\"Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor.\n\n    Args:\n        x (Tensor): The input tensor of shape [N, L, C] before conversion.\n        hw_shape (Sequence[int]): The height and width of output feature map.\n\n    Returns:\n        Tensor: The output tensor of shape [N, C, H, W] after conversion.\n    \"\"\"\n    H, W = hw_shape\n    assert len(x.shape) == 3\n    B, L, C = x.shape\n    assert L == H * W, 'The seq_len does not match H, W'\n    return x.transpose(1, 2).reshape(B, C, H, W).contiguous()\n\n\ndef nchw_to_nlc(x):\n    \"\"\"Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor.\n\n    Args:\n        x (Tensor): The input tensor of shape [N, C, H, W] before conversion.\n\n    Returns:\n        Tensor: The output tensor of shape [N, L, C] after conversion.\n    \"\"\"\n    assert len(x.shape) == 4\n    return x.flatten(2).transpose(1, 2).contiguous()\n\n\nclass AdaptivePadding(nn.Module):\n    \"\"\"Applies padding to input (if needed) so that input can get fully covered\n    by filter you specified. It support two modes \"same\" and \"corner\". The\n    \"same\" mode is same with \"SAME\" padding mode in TensorFlow, pad zero around\n    input. The \"corner\"  mode would pad zero to bottom right.\n\n    Args:\n        kernel_size (int | tuple): Size of the kernel:\n        stride (int | tuple): Stride of the filter. Default: 1:\n        dilation (int | tuple): Spacing between kernel elements.\n            Default: 1\n        padding (str): Support \"same\" and \"corner\", \"corner\" mode\n            would pad zero to bottom right, and \"same\" mode would\n            pad zero around input. Default: \"corner\".\n    Example:\n        >>> kernel_size = 16\n        >>> stride = 16\n        >>> dilation = 1\n        >>> input = torch.rand(1, 1, 15, 17)\n        >>> adap_pad = AdaptivePadding(\n        >>>     kernel_size=kernel_size,\n        >>>     stride=stride,\n        >>>     dilation=dilation,\n        >>>     padding=\"corner\")\n        >>> out = adap_pad(input)\n        >>> assert (out.shape[2], out.shape[3]) == (16, 32)\n        >>> input = torch.rand(1, 1, 16, 17)\n        >>> out = adap_pad(input)\n        >>> assert (out.shape[2], out.shape[3]) == (16, 32)\n    \"\"\"\n\n    def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'):\n\n        super(AdaptivePadding, self).__init__()\n\n        assert padding in ('same', 'corner')\n\n        kernel_size = to_2tuple(kernel_size)\n        stride = to_2tuple(stride)\n        padding = to_2tuple(padding)\n        dilation = to_2tuple(dilation)\n\n        self.padding = padding\n        self.kernel_size = kernel_size\n        self.stride = stride\n        self.dilation = dilation\n\n    def get_pad_shape(self, input_shape):\n        input_h, input_w = input_shape\n        kernel_h, kernel_w = self.kernel_size\n        stride_h, stride_w = self.stride\n        output_h = math.ceil(input_h / stride_h)\n        output_w = math.ceil(input_w / stride_w)\n        pad_h = max((output_h - 1) * stride_h +\n                    (kernel_h - 1) * self.dilation[0] + 1 - input_h, 0)\n        pad_w = max((output_w - 1) * stride_w +\n                    (kernel_w - 1) * self.dilation[1] + 1 - input_w, 0)\n        return pad_h, pad_w\n\n    def forward(self, x):\n        pad_h, pad_w = self.get_pad_shape(x.size()[-2:])\n        if pad_h > 0 or pad_w > 0:\n            if self.padding == 'corner':\n                x = F.pad(x, [0, pad_w, 0, pad_h])\n            elif self.padding == 'same':\n                x = F.pad(x, [\n                    pad_w // 2, pad_w - pad_w // 2, pad_h // 2,\n                    pad_h - pad_h // 2\n                ])\n        return x\n\n\nclass PatchEmbed(BaseModule):\n    \"\"\"Image to Patch Embedding.\n\n    We use a conv layer to implement PatchEmbed.\n\n    Args:\n        in_channels (int): The num of input channels. Default: 3\n        embed_dims (int): The dimensions of embedding. Default: 768\n        conv_type (str): The config dict for embedding\n            conv layer type selection. Default: \"Conv2d.\n        kernel_size (int): The kernel_size of embedding conv. Default: 16.\n        stride (int): The slide stride of embedding conv.\n            Default: None (Would be set as `kernel_size`).\n        padding (int | tuple | string ): The padding length of\n            embedding conv. When it is a string, it means the mode\n            of adaptive padding, support \"same\" and \"corner\" now.\n            Default: \"corner\".\n        dilation (int): The dilation rate of embedding conv. Default: 1.\n        bias (bool): Bias of embed conv. Default: True.\n        norm_cfg (dict, optional): Config dict for normalization layer.\n            Default: None.\n        input_size (int | tuple | None): The size of input, which will be\n            used to calculate the out size. Only work when `dynamic_size`\n            is False. Default: None.\n        init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(\n        self,\n        in_channels=3,\n        embed_dims=768,\n        conv_type='Conv2d',\n        kernel_size=16,\n        stride=16,\n        padding='corner',\n        dilation=1,\n        bias=True,\n        norm_cfg=None,\n        input_size=None,\n        init_cfg=None,\n    ):\n        super(PatchEmbed, self).__init__(init_cfg=init_cfg)\n\n        self.embed_dims = embed_dims\n        if stride is None:\n            stride = kernel_size\n\n        kernel_size = to_2tuple(kernel_size)\n        stride = to_2tuple(stride)\n        dilation = to_2tuple(dilation)\n\n        if isinstance(padding, str):\n            self.adap_padding = AdaptivePadding(\n                kernel_size=kernel_size,\n                stride=stride,\n                dilation=dilation,\n                padding=padding)\n            # disable the padding of conv\n            padding = 0\n        else:\n            self.adap_padding = None\n        padding = to_2tuple(padding)\n\n        self.projection = build_conv_layer(\n            dict(type=conv_type),\n            in_channels=in_channels,\n            out_channels=embed_dims,\n            kernel_size=kernel_size,\n            stride=stride,\n            padding=padding,\n            dilation=dilation,\n            bias=bias)\n\n        if norm_cfg is not None:\n            self.norm = build_norm_layer(norm_cfg, embed_dims)[1]\n        else:\n            self.norm = None\n\n        if input_size:\n            input_size = to_2tuple(input_size)\n            # `init_out_size` would be used outside to\n            # calculate the num_patches\n            # when `use_abs_pos_embed` outside\n            self.init_input_size = input_size\n            if self.adap_padding:\n                pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)\n                input_h, input_w = input_size\n                input_h = input_h + pad_h\n                input_w = input_w + pad_w\n                input_size = (input_h, input_w)\n\n            # https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html\n            h_out = (input_size[0] + 2 * padding[0] - dilation[0] *\n                     (kernel_size[0] - 1) - 1) // stride[0] + 1\n            w_out = (input_size[1] + 2 * padding[1] - dilation[1] *\n                     (kernel_size[1] - 1) - 1) // stride[1] + 1\n            self.init_out_size = (h_out, w_out)\n        else:\n            self.init_input_size = None\n            self.init_out_size = None\n\n    def forward(self, x):\n        \"\"\"\n        Args:\n            x (Tensor): Has shape (B, C, H, W). In most case, C is 3.\n\n        Returns:\n            tuple: Contains merged results and its spatial shape.\n\n                - x (Tensor): Has shape (B, out_h * out_w, embed_dims)\n                - out_size (tuple[int]): Spatial shape of x, arrange as\n                    (out_h, out_w).\n        \"\"\"\n\n        if self.adap_padding:\n            x = self.adap_padding(x)\n\n        x = self.projection(x)\n        out_size = (x.shape[2], x.shape[3])\n        x = x.flatten(2).transpose(1, 2)\n        if self.norm is not None:\n            x = self.norm(x)\n        return x, out_size\n\n\nclass PatchMerging(BaseModule):\n    \"\"\"Merge patch feature map.\n\n    This layer groups feature map by kernel_size, and applies norm and linear\n    layers to the grouped feature map. Our implementation uses `nn.Unfold` to\n    merge patch, which is about 25% faster than original implementation.\n    Instead, we need to modify pretrained models for compatibility.\n\n    Args:\n        in_channels (int): The num of input channels.\n            to gets fully covered by filter and stride you specified..\n            Default: True.\n        out_channels (int): The num of output channels.\n        kernel_size (int | tuple, optional): the kernel size in the unfold\n            layer. Defaults to 2.\n        stride (int | tuple, optional): the stride of the sliding blocks in the\n            unfold layer. Default: None. (Would be set as `kernel_size`)\n        padding (int | tuple | string ): The padding length of\n            embedding conv. When it is a string, it means the mode\n            of adaptive padding, support \"same\" and \"corner\" now.\n            Default: \"corner\".\n        dilation (int | tuple, optional): dilation parameter in the unfold\n            layer. Default: 1.\n        bias (bool, optional): Whether to add bias in linear layer or not.\n            Defaults: False.\n        norm_cfg (dict, optional): Config dict for normalization layer.\n            Default: dict(type='LN').\n        init_cfg (dict, optional): The extra config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels,\n                 out_channels,\n                 kernel_size=2,\n                 stride=None,\n                 padding='corner',\n                 dilation=1,\n                 bias=False,\n                 norm_cfg=dict(type='LN'),\n                 init_cfg=None):\n        super().__init__(init_cfg=init_cfg)\n        self.in_channels = in_channels\n        self.out_channels = out_channels\n        if stride:\n            stride = stride\n        else:\n            stride = kernel_size\n\n        kernel_size = to_2tuple(kernel_size)\n        stride = to_2tuple(stride)\n        dilation = to_2tuple(dilation)\n\n        if isinstance(padding, str):\n            self.adap_padding = AdaptivePadding(\n                kernel_size=kernel_size,\n                stride=stride,\n                dilation=dilation,\n                padding=padding)\n            # disable the padding of unfold\n            padding = 0\n        else:\n            self.adap_padding = None\n\n        padding = to_2tuple(padding)\n        self.sampler = nn.Unfold(\n            kernel_size=kernel_size,\n            dilation=dilation,\n            padding=padding,\n            stride=stride)\n\n        sample_dim = kernel_size[0] * kernel_size[1] * in_channels\n\n        if norm_cfg is not None:\n            self.norm = build_norm_layer(norm_cfg, sample_dim)[1]\n        else:\n            self.norm = None\n\n        self.reduction = nn.Linear(sample_dim, out_channels, bias=bias)\n\n    def forward(self, x, input_size):\n        \"\"\"\n        Args:\n            x (Tensor): Has shape (B, H*W, C_in).\n            input_size (tuple[int]): The spatial shape of x, arrange as (H, W).\n                Default: None.\n\n        Returns:\n            tuple: Contains merged results and its spatial shape.\n\n                - x (Tensor): Has shape (B, Merged_H * Merged_W, C_out)\n                - out_size (tuple[int]): Spatial shape of x, arrange as\n                    (Merged_H, Merged_W).\n        \"\"\"\n        B, L, C = x.shape\n        assert isinstance(input_size, Sequence), f'Expect ' \\\n                                                 f'input_size is ' \\\n                                                 f'`Sequence` ' \\\n                                                 f'but get {input_size}'\n\n        H, W = input_size\n        assert L == H * W, 'input feature has wrong size'\n\n        x = x.view(B, H, W, C).permute([0, 3, 1, 2])  # B, C, H, W\n        # Use nn.Unfold to merge patch. About 25% faster than original method,\n        # but need to modify pretrained model for compatibility\n\n        if self.adap_padding:\n            x = self.adap_padding(x)\n            H, W = x.shape[-2:]\n\n        x = self.sampler(x)\n        # if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2)\n\n        out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] *\n                 (self.sampler.kernel_size[0] - 1) -\n                 1) // self.sampler.stride[0] + 1\n        out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] *\n                 (self.sampler.kernel_size[1] - 1) -\n                 1) // self.sampler.stride[1] + 1\n\n        output_size = (out_h, out_w)\n        x = x.transpose(1, 2)  # B, H/2*W/2, 4*C\n        x = self.norm(x) if self.norm else x\n        x = self.reduction(x)\n        return x, output_size\n\n\ndef inverse_sigmoid(x, eps=1e-5):\n    \"\"\"Inverse function of sigmoid.\n\n    Args:\n        x (Tensor): The tensor to do the\n            inverse.\n        eps (float): EPS avoid numerical\n            overflow. Defaults 1e-5.\n    Returns:\n        Tensor: The x has passed the inverse\n            function of sigmoid, has same\n            shape with input.\n    \"\"\"\n    x = x.clamp(min=0, max=1)\n    x1 = x.clamp(min=eps)\n    x2 = (1 - x).clamp(min=eps)\n    return torch.log(x1 / x2)\n\n\n@TRANSFORMER_LAYER.register_module()\nclass DetrTransformerDecoderLayer(BaseTransformerLayer):\n    \"\"\"Implements decoder layer in DETR transformer.\n\n    Args:\n        attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )):\n            Configs for self_attention or cross_attention, the order\n            should be consistent with it in `operation_order`. If it is\n            a dict, it would be expand to the number of attention in\n            `operation_order`.\n        feedforward_channels (int): The hidden dimension for FFNs.\n        ffn_dropout (float): Probability of an element to be zeroed\n            in ffn. Default 0.0.\n        operation_order (tuple[str]): The execution order of operation\n            in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').\n            Default：None\n        act_cfg (dict): The activation config for FFNs. Default: `LN`\n        norm_cfg (dict): Config dict for normalization layer.\n            Default: `LN`.\n        ffn_num_fcs (int): The number of fully-connected layers in FFNs.\n            Default：2.\n    \"\"\"\n\n    def __init__(self,\n                 attn_cfgs,\n                 feedforward_channels,\n                 ffn_dropout=0.0,\n                 operation_order=None,\n                 act_cfg=dict(type='ReLU', inplace=True),\n                 norm_cfg=dict(type='LN'),\n                 ffn_num_fcs=2,\n                 **kwargs):\n        super(DetrTransformerDecoderLayer, self).__init__(\n            attn_cfgs=attn_cfgs,\n            feedforward_channels=feedforward_channels,\n            ffn_dropout=ffn_dropout,\n            operation_order=operation_order,\n            act_cfg=act_cfg,\n            norm_cfg=norm_cfg,\n            ffn_num_fcs=ffn_num_fcs,\n            **kwargs)\n        assert len(operation_order) == 6\n        assert set(operation_order) == set(\n            ['self_attn', 'norm', 'cross_attn', 'ffn'])\n\n\n@TRANSFORMER_LAYER_SEQUENCE.register_module()\nclass DetrTransformerEncoder(TransformerLayerSequence):\n    \"\"\"TransformerEncoder of DETR.\n\n    Args:\n        post_norm_cfg (dict): Config of last normalization layer. Default：\n            `LN`. Only used when `self.pre_norm` is `True`\n    \"\"\"\n\n    def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs):\n        super(DetrTransformerEncoder, self).__init__(*args, **kwargs)\n        if post_norm_cfg is not None:\n            self.post_norm = build_norm_layer(\n                post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None\n        else:\n            assert not self.pre_norm, f'Use prenorm in ' \\\n                                      f'{self.__class__.__name__},' \\\n                                      f'Please specify post_norm_cfg'\n            self.post_norm = None\n\n    def forward(self, *args, **kwargs):\n        \"\"\"Forward function for `TransformerCoder`.\n\n        Returns:\n            Tensor: forwarded results with shape [num_query, bs, embed_dims].\n        \"\"\"\n        x = super(DetrTransformerEncoder, self).forward(*args, **kwargs)\n        if self.post_norm is not None:\n            x = self.post_norm(x)\n        return x\n\n\n@TRANSFORMER_LAYER_SEQUENCE.register_module()\nclass DetrTransformerDecoder(TransformerLayerSequence):\n    \"\"\"Implements the decoder in DETR transformer.\n\n    Args:\n        return_intermediate (bool): Whether to return intermediate outputs.\n        post_norm_cfg (dict): Config of last normalization layer. Default：\n            `LN`.\n    \"\"\"\n\n    def __init__(self,\n                 *args,\n                 post_norm_cfg=dict(type='LN'),\n                 return_intermediate=False,\n                 **kwargs):\n\n        super(DetrTransformerDecoder, self).__init__(*args, **kwargs)\n        self.return_intermediate = return_intermediate\n        if post_norm_cfg is not None:\n            self.post_norm = build_norm_layer(post_norm_cfg,\n                                              self.embed_dims)[1]\n        else:\n            self.post_norm = None\n\n    def forward(self, query, *args, **kwargs):\n        \"\"\"Forward function for `TransformerDecoder`.\n\n        Args:\n            query (Tensor): Input query with shape\n                `(num_query, bs, embed_dims)`.\n\n        Returns:\n            Tensor: Results with shape [1, num_query, bs, embed_dims] when\n                return_intermediate is `False`, otherwise it has shape\n                [num_layers, num_query, bs, embed_dims].\n        \"\"\"\n        if not self.return_intermediate:\n            x = super().forward(query, *args, **kwargs)\n            if self.post_norm:\n                x = self.post_norm(x)[None]\n            return x\n\n        intermediate = []\n        for layer in self.layers:\n            query = layer(query, *args, **kwargs)\n            if self.return_intermediate:\n                if self.post_norm is not None:\n                    intermediate.append(self.post_norm(query))\n                else:\n                    intermediate.append(query)\n        return torch.stack(intermediate)\n\n\n@TRANSFORMER.register_module()\nclass Transformer(BaseModule):\n    \"\"\"Implements the DETR transformer.\n\n    Following the official DETR implementation, this module copy-paste\n    from torch.nn.Transformer with modifications:\n\n        * positional encodings are passed in MultiheadAttention\n        * extra LN at the end of encoder is removed\n        * decoder returns a stack of activations from all decoding layers\n\n    See `paper: End-to-End Object Detection with Transformers\n    <https://arxiv.org/pdf/2005.12872>`_ for details.\n\n    Args:\n        encoder (`mmcv.ConfigDict` | Dict): Config of\n            TransformerEncoder. Defaults to None.\n        decoder ((`mmcv.ConfigDict` | Dict)): Config of\n            TransformerDecoder. Defaults to None\n        init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n            Defaults to None.\n    \"\"\"\n\n    def __init__(self, encoder=None, decoder=None, init_cfg=None):\n        super(Transformer, self).__init__(init_cfg=init_cfg)\n        self.encoder = build_transformer_layer_sequence(encoder)\n        self.decoder = build_transformer_layer_sequence(decoder)\n        self.embed_dims = self.encoder.embed_dims\n\n    def init_weights(self):\n        # follow the official DETR to init parameters\n        for m in self.modules():\n            if hasattr(m, 'weight') and m.weight.dim() > 1:\n                xavier_init(m, distribution='uniform')\n        self._is_init = True\n\n    def forward(self, x, mask, query_embed, pos_embed):\n        \"\"\"Forward function for `Transformer`.\n\n        Args:\n            x (Tensor): Input query with shape [bs, c, h, w] where\n                c = embed_dims.\n            mask (Tensor): The key_padding_mask used for encoder and decoder,\n                with shape [bs, h, w].\n            query_embed (Tensor): The query embedding for decoder, with shape\n                [num_query, c].\n            pos_embed (Tensor): The positional encoding for encoder and\n                decoder, with the same shape as `x`.\n\n        Returns:\n            tuple[Tensor]: results of decoder containing the following tensor.\n\n                - out_dec: Output from decoder. If return_intermediate_dec \\\n                      is True output has shape [num_dec_layers, bs,\n                      num_query, embed_dims], else has shape [1, bs, \\\n                      num_query, embed_dims].\n                - memory: Output results from encoder, with shape \\\n                      [bs, embed_dims, h, w].\n        \"\"\"\n        bs, c, h, w = x.shape\n        # use `view` instead of `flatten` for dynamically exporting to ONNX\n        x = x.view(bs, c, -1).permute(2, 0, 1)  # [bs, c, h, w] -> [h*w, bs, c]\n        pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1)\n        query_embed = query_embed.unsqueeze(1).repeat(\n            1, bs, 1)  # [num_query, dim] -> [num_query, bs, dim]\n        mask = mask.view(bs, -1)  # [bs, h, w] -> [bs, h*w]\n        memory = self.encoder(\n            query=x,\n            key=None,\n            value=None,\n            query_pos=pos_embed,\n            query_key_padding_mask=mask)\n        target = torch.zeros_like(query_embed)\n        # out_dec: [num_layers, num_query, bs, dim]\n        out_dec = self.decoder(\n            query=target,\n            key=memory,\n            value=memory,\n            key_pos=pos_embed,\n            query_pos=query_embed,\n            key_padding_mask=mask)\n        out_dec = out_dec.transpose(1, 2)\n        memory = memory.permute(1, 2, 0).reshape(bs, c, h, w)\n        return out_dec, memory\n\n\n@TRANSFORMER_LAYER_SEQUENCE.register_module()\nclass DeformableDetrTransformerDecoder(TransformerLayerSequence):\n    \"\"\"Implements the decoder in DETR transformer.\n\n    Args:\n        return_intermediate (bool): Whether to return intermediate outputs.\n        coder_norm_cfg (dict): Config of last normalization layer. Default：\n            `LN`.\n    \"\"\"\n\n    def __init__(self, *args, return_intermediate=False, **kwargs):\n\n        super(DeformableDetrTransformerDecoder, self).__init__(*args, **kwargs)\n        self.return_intermediate = return_intermediate\n\n    def forward(self,\n                query,\n                *args,\n                reference_points=None,\n                valid_ratios=None,\n                reg_branches=None,\n                **kwargs):\n        \"\"\"Forward function for `TransformerDecoder`.\n\n        Args:\n            query (Tensor): Input query with shape\n                `(num_query, bs, embed_dims)`.\n            reference_points (Tensor): The reference\n                points of offset. has shape\n                (bs, num_query, 4) when as_two_stage,\n                otherwise has shape ((bs, num_query, 2).\n            valid_ratios (Tensor): The radios of valid\n                points on the feature map, has shape\n                (bs, num_levels, 2)\n            reg_branch: (obj:`nn.ModuleList`): Used for\n                refining the regression results. Only would\n                be passed when with_box_refine is True,\n                otherwise would be passed a `None`.\n\n        Returns:\n            Tensor: Results with shape [1, num_query, bs, embed_dims] when\n                return_intermediate is `False`, otherwise it has shape\n                [num_layers, num_query, bs, embed_dims].\n        \"\"\"\n        output = query\n        intermediate = []\n        intermediate_reference_points = []\n        for lid, layer in enumerate(self.layers):\n            if reference_points.shape[-1] == 4:\n                reference_points_input = reference_points[:, :, None] * \\\n                    torch.cat([valid_ratios, valid_ratios], -1)[:, None]\n            else:\n                assert reference_points.shape[-1] == 2\n                reference_points_input = reference_points[:, :, None] * \\\n                    valid_ratios[:, None]\n            output = layer(\n                output,\n                *args,\n                reference_points=reference_points_input,\n                **kwargs)\n            output = output.permute(1, 0, 2)\n\n            if reg_branches is not None:\n                tmp = reg_branches[lid](output)\n                if reference_points.shape[-1] == 4:\n                    new_reference_points = tmp + inverse_sigmoid(\n                        reference_points)\n                    new_reference_points = new_reference_points.sigmoid()\n                else:\n                    assert reference_points.shape[-1] == 2\n                    new_reference_points = tmp\n                    new_reference_points[..., :2] = tmp[\n                        ..., :2] + inverse_sigmoid(reference_points)\n                    new_reference_points = new_reference_points.sigmoid()\n                reference_points = new_reference_points.detach()\n\n            output = output.permute(1, 0, 2)\n            if self.return_intermediate:\n                intermediate.append(output)\n                intermediate_reference_points.append(reference_points)\n\n        if self.return_intermediate:\n            return torch.stack(intermediate), torch.stack(\n                intermediate_reference_points)\n\n        return output, reference_points\n\n\n@TRANSFORMER.register_module()\nclass DeformableDetrTransformer(Transformer):\n    \"\"\"Implements the DeformableDETR transformer.\n\n    Args:\n        as_two_stage (bool): Generate query from encoder features.\n            Default: False.\n        num_feature_levels (int): Number of feature maps from FPN:\n            Default: 4.\n        two_stage_num_proposals (int): Number of proposals when set\n            `as_two_stage` as True. Default: 300.\n    \"\"\"\n\n    def __init__(self,\n                 as_two_stage=False,\n                 num_feature_levels=4,\n                 two_stage_num_proposals=300,\n                 **kwargs):\n        super(DeformableDetrTransformer, self).__init__(**kwargs)\n        self.as_two_stage = as_two_stage\n        self.num_feature_levels = num_feature_levels\n        self.two_stage_num_proposals = two_stage_num_proposals\n        self.embed_dims = self.encoder.embed_dims\n        self.init_layers()\n\n    def init_layers(self):\n        \"\"\"Initialize layers of the DeformableDetrTransformer.\"\"\"\n        self.level_embeds = nn.Parameter(\n            torch.Tensor(self.num_feature_levels, self.embed_dims))\n\n        if self.as_two_stage:\n            self.enc_output = nn.Linear(self.embed_dims, self.embed_dims)\n            self.enc_output_norm = nn.LayerNorm(self.embed_dims)\n            self.pos_trans = nn.Linear(self.embed_dims * 2,\n                                       self.embed_dims * 2)\n            self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2)\n        else:\n            self.reference_points = nn.Linear(self.embed_dims, 2)\n\n    def init_weights(self):\n        \"\"\"Initialize the transformer weights.\"\"\"\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n        for m in self.modules():\n            if isinstance(m, MultiScaleDeformableAttention):\n                m.init_weights()\n        if not self.as_two_stage:\n            xavier_init(self.reference_points, distribution='uniform', bias=0.)\n        normal_(self.level_embeds)\n\n    def gen_encoder_output_proposals(self, memory, memory_padding_mask,\n                                     spatial_shapes):\n        \"\"\"Generate proposals from encoded memory.\n\n        Args:\n            memory (Tensor) : The output of encoder,\n                has shape (bs, num_key, embed_dim).  num_key is\n                equal the number of points on feature map from\n                all level.\n            memory_padding_mask (Tensor): Padding mask for memory.\n                has shape (bs, num_key).\n            spatial_shapes (Tensor): The shape of all feature maps.\n                has shape (num_level, 2).\n\n        Returns:\n            tuple: A tuple of feature map and bbox prediction.\n\n                - output_memory (Tensor): The input of decoder,  \\\n                    has shape (bs, num_key, embed_dim).  num_key is \\\n                    equal the number of points on feature map from \\\n                    all levels.\n                - output_proposals (Tensor): The normalized proposal \\\n                    after a inverse sigmoid, has shape \\\n                    (bs, num_keys, 4).\n        \"\"\"\n\n        N, S, C = memory.shape\n        proposals = []\n        _cur = 0\n        for lvl, (H, W) in enumerate(spatial_shapes):\n            mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view(\n                N, H, W, 1)\n            valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n            valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n            grid_y, grid_x = torch.meshgrid(\n                torch.linspace(\n                    0, H - 1, H, dtype=torch.float32, device=memory.device),\n                torch.linspace(\n                    0, W - 1, W, dtype=torch.float32, device=memory.device))\n            grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n            scale = torch.cat([valid_W.unsqueeze(-1),\n                               valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2)\n            grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale\n            wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)\n            proposal = torch.cat((grid, wh), -1).view(N, -1, 4)\n            proposals.append(proposal)\n            _cur += (H * W)\n        output_proposals = torch.cat(proposals, 1)\n        output_proposals_valid = ((output_proposals > 0.01) &\n                                  (output_proposals < 0.99)).all(\n                                      -1, keepdim=True)\n        output_proposals = torch.log(output_proposals / (1 - output_proposals))\n        output_proposals = output_proposals.masked_fill(\n            memory_padding_mask.unsqueeze(-1), float('inf'))\n        output_proposals = output_proposals.masked_fill(\n            ~output_proposals_valid, float('inf'))\n\n        output_memory = memory\n        output_memory = output_memory.masked_fill(\n            memory_padding_mask.unsqueeze(-1), float(0))\n        output_memory = output_memory.masked_fill(~output_proposals_valid,\n                                                  float(0))\n        output_memory = self.enc_output_norm(self.enc_output(output_memory))\n        return output_memory, output_proposals\n\n    @staticmethod\n    def get_reference_points(spatial_shapes, valid_ratios, device):\n        \"\"\"Get the reference points used in decoder.\n\n        Args:\n            spatial_shapes (Tensor): The shape of all\n                feature maps, has shape (num_level, 2).\n            valid_ratios (Tensor): The radios of valid\n                points on the feature map, has shape\n                (bs, num_levels, 2)\n            device (obj:`device`): The device where\n                reference_points should be.\n\n        Returns:\n            Tensor: reference points used in decoder, has \\\n                shape (bs, num_keys, num_levels, 2).\n        \"\"\"\n        reference_points_list = []\n        for lvl, (H, W) in enumerate(spatial_shapes):\n            #  TODO  check this 0.5\n            ref_y, ref_x = torch.meshgrid(\n                torch.linspace(\n                    0.5, H - 0.5, H, dtype=torch.float32, device=device),\n                torch.linspace(\n                    0.5, W - 0.5, W, dtype=torch.float32, device=device))\n            ref_y = ref_y.reshape(-1)[None] / (\n                valid_ratios[:, None, lvl, 1] * H)\n            ref_x = ref_x.reshape(-1)[None] / (\n                valid_ratios[:, None, lvl, 0] * W)\n            ref = torch.stack((ref_x, ref_y), -1)\n            reference_points_list.append(ref)\n        reference_points = torch.cat(reference_points_list, 1)\n        reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n        return reference_points\n\n    def get_valid_ratio(self, mask):\n        \"\"\"Get the valid radios of feature maps of all  level.\"\"\"\n        _, H, W = mask.shape\n        valid_H = torch.sum(~mask[:, :, 0], 1)\n        valid_W = torch.sum(~mask[:, 0, :], 1)\n        valid_ratio_h = valid_H.float() / H\n        valid_ratio_w = valid_W.float() / W\n        valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n        return valid_ratio\n\n    def get_proposal_pos_embed(self,\n                               proposals,\n                               num_pos_feats=128,\n                               temperature=10000):\n        \"\"\"Get the position embedding of proposal.\"\"\"\n        scale = 2 * math.pi\n        dim_t = torch.arange(\n            num_pos_feats, dtype=torch.float32, device=proposals.device)\n        dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)\n        # N, L, 4\n        proposals = proposals.sigmoid() * scale\n        # N, L, 4, 128\n        pos = proposals[:, :, :, None] / dim_t\n        # N, L, 4, 64, 2\n        pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()),\n                          dim=4).flatten(2)\n        return pos\n\n    def forward(self,\n                mlvl_feats,\n                mlvl_masks,\n                query_embed,\n                mlvl_pos_embeds,\n                reg_branches=None,\n                cls_branches=None,\n                **kwargs):\n        \"\"\"Forward function for `Transformer`.\n\n        Args:\n            mlvl_feats (list(Tensor)): Input queries from\n                different level. Each element has shape\n                [bs, embed_dims, h, w].\n            mlvl_masks (list(Tensor)): The key_padding_mask from\n                different level used for encoder and decoder,\n                each element has shape  [bs, h, w].\n            query_embed (Tensor): The query embedding for decoder,\n                with shape [num_query, c].\n            mlvl_pos_embeds (list(Tensor)): The positional encoding\n                of feats from different level, has the shape\n                 [bs, embed_dims, h, w].\n            reg_branches (obj:`nn.ModuleList`): Regression heads for\n                feature maps from each decoder layer. Only would\n                be passed when\n                `with_box_refine` is True. Default to None.\n            cls_branches (obj:`nn.ModuleList`): Classification heads\n                for feature maps from each decoder layer. Only would\n                 be passed when `as_two_stage`\n                 is True. Default to None.\n\n\n        Returns:\n            tuple[Tensor]: results of decoder containing the following tensor.\n\n                - inter_states: Outputs from decoder. If\n                    return_intermediate_dec is True output has shape \\\n                      (num_dec_layers, bs, num_query, embed_dims), else has \\\n                      shape (1, bs, num_query, embed_dims).\n                - init_reference_out: The initial value of reference \\\n                    points, has shape (bs, num_queries, 4).\n                - inter_references_out: The internal value of reference \\\n                    points in decoder, has shape \\\n                    (num_dec_layers, bs,num_query, embed_dims)\n                - enc_outputs_class: The classification score of \\\n                    proposals generated from \\\n                    encoder's feature maps, has shape \\\n                    (batch, h*w, num_classes). \\\n                    Only would be returned when `as_two_stage` is True, \\\n                    otherwise None.\n                - enc_outputs_coord_unact: The regression results \\\n                    generated from encoder's feature maps., has shape \\\n                    (batch, h*w, 4). Only would \\\n                    be returned when `as_two_stage` is True, \\\n                    otherwise None.\n        \"\"\"\n        assert self.as_two_stage or query_embed is not None\n\n        feat_flatten = []\n        mask_flatten = []\n        lvl_pos_embed_flatten = []\n        spatial_shapes = []\n        for lvl, (feat, mask, pos_embed) in enumerate(\n                zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)):\n            bs, c, h, w = feat.shape\n            spatial_shape = (h, w)\n            spatial_shapes.append(spatial_shape)\n            feat = feat.flatten(2).transpose(1, 2)\n            mask = mask.flatten(1)\n            pos_embed = pos_embed.flatten(2).transpose(1, 2)\n            lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1)\n            lvl_pos_embed_flatten.append(lvl_pos_embed)\n            feat_flatten.append(feat)\n            mask_flatten.append(mask)\n        feat_flatten = torch.cat(feat_flatten, 1)\n        mask_flatten = torch.cat(mask_flatten, 1)\n        lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n        spatial_shapes = torch.as_tensor(\n            spatial_shapes, dtype=torch.long, device=feat_flatten.device)\n        level_start_index = torch.cat((spatial_shapes.new_zeros(\n            (1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))\n        valid_ratios = torch.stack(\n            [self.get_valid_ratio(m) for m in mlvl_masks], 1)\n\n        reference_points = \\\n            self.get_reference_points(spatial_shapes,\n                                      valid_ratios,\n                                      device=feat.device)\n\n        feat_flatten = feat_flatten.permute(1, 0, 2)  # (H*W, bs, embed_dims)\n        lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute(\n            1, 0, 2)  # (H*W, bs, embed_dims)\n        memory = self.encoder(\n            query=feat_flatten,\n            key=None,\n            value=None,\n            query_pos=lvl_pos_embed_flatten,\n            query_key_padding_mask=mask_flatten,\n            spatial_shapes=spatial_shapes,\n            reference_points=reference_points,\n            level_start_index=level_start_index,\n            valid_ratios=valid_ratios,\n            **kwargs)\n\n        memory = memory.permute(1, 0, 2)\n        bs, _, c = memory.shape\n        if self.as_two_stage:\n            output_memory, output_proposals = \\\n                self.gen_encoder_output_proposals(\n                    memory, mask_flatten, spatial_shapes)\n            enc_outputs_class = cls_branches[self.decoder.num_layers](\n                output_memory)\n            enc_outputs_coord_unact = \\\n                reg_branches[\n                    self.decoder.num_layers](output_memory) + output_proposals\n\n            topk = self.two_stage_num_proposals\n            # We only use the first channel in enc_outputs_class as foreground,\n            # the other (num_classes - 1) channels are actually not used.\n            # Its targets are set to be 0s, which indicates the first\n            # class (foreground) because we use [0, num_classes - 1] to\n            # indicate class labels, background class is indicated by\n            # num_classes (similar convention in RPN).\n            # See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/deformable_detr_head.py#L241 # noqa\n            # This follows the official implementation of Deformable DETR.\n            topk_proposals = torch.topk(\n                enc_outputs_class[..., 0], topk, dim=1)[1]\n            topk_coords_unact = torch.gather(\n                enc_outputs_coord_unact, 1,\n                topk_proposals.unsqueeze(-1).repeat(1, 1, 4))\n            topk_coords_unact = topk_coords_unact.detach()\n            reference_points = topk_coords_unact.sigmoid()\n            init_reference_out = reference_points\n            pos_trans_out = self.pos_trans_norm(\n                self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact)))\n            query_pos, query = torch.split(pos_trans_out, c, dim=2)\n        else:\n            query_pos, query = torch.split(query_embed, c, dim=1)\n            query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1)\n            query = query.unsqueeze(0).expand(bs, -1, -1)\n            reference_points = self.reference_points(query_pos).sigmoid()\n            init_reference_out = reference_points\n\n        # decoder\n        query = query.permute(1, 0, 2)\n        memory = memory.permute(1, 0, 2)\n        query_pos = query_pos.permute(1, 0, 2)\n        inter_states, inter_references = self.decoder(\n            query=query,\n            key=None,\n            value=memory,\n            query_pos=query_pos,\n            key_padding_mask=mask_flatten,\n            reference_points=reference_points,\n            spatial_shapes=spatial_shapes,\n            level_start_index=level_start_index,\n            valid_ratios=valid_ratios,\n            reg_branches=reg_branches,\n            **kwargs)\n\n        inter_references_out = inter_references\n        if self.as_two_stage:\n            return inter_states, init_reference_out,\\\n                inter_references_out, enc_outputs_class,\\\n                enc_outputs_coord_unact\n        return inter_states, init_reference_out, \\\n            inter_references_out, None, None\n\n\n@TRANSFORMER.register_module()\nclass DynamicConv(BaseModule):\n    \"\"\"Implements Dynamic Convolution.\n\n    This module generate parameters for each sample and\n    use bmm to implement 1*1 convolution. Code is modified\n    from the `official github repo <https://github.com/PeizeSun/\n    SparseR-CNN/blob/main/projects/SparseRCNN/sparsercnn/head.py#L258>`_ .\n\n    Args:\n        in_channels (int): The input feature channel.\n            Defaults to 256.\n        feat_channels (int): The inner feature channel.\n            Defaults to 64.\n        out_channels (int, optional): The output feature channel.\n            When not specified, it will be set to `in_channels`\n            by default\n        input_feat_shape (int): The shape of input feature.\n            Defaults to 7.\n        with_proj (bool): Project two-dimentional feature to\n            one-dimentional feature. Default to True.\n        act_cfg (dict): The activation config for DynamicConv.\n        norm_cfg (dict): Config dict for normalization layer. Default\n            layer normalization.\n        init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.\n            Default: None.\n    \"\"\"\n\n    def __init__(self,\n                 in_channels=256,\n                 feat_channels=64,\n                 out_channels=None,\n                 input_feat_shape=7,\n                 with_proj=True,\n                 act_cfg=dict(type='ReLU', inplace=True),\n                 norm_cfg=dict(type='LN'),\n                 init_cfg=None):\n        super(DynamicConv, self).__init__(init_cfg)\n        self.in_channels = in_channels\n        self.feat_channels = feat_channels\n        self.out_channels_raw = out_channels\n        self.input_feat_shape = input_feat_shape\n        self.with_proj = with_proj\n        self.act_cfg = act_cfg\n        self.norm_cfg = norm_cfg\n        self.out_channels = out_channels if out_channels else in_channels\n\n        self.num_params_in = self.in_channels * self.feat_channels\n        self.num_params_out = self.out_channels * self.feat_channels\n        self.dynamic_layer = nn.Linear(\n            self.in_channels, self.num_params_in + self.num_params_out)\n\n        self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]\n        self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1]\n\n        self.activation = build_activation_layer(act_cfg)\n\n        num_output = self.out_channels * input_feat_shape**2\n        if self.with_proj:\n            self.fc_layer = nn.Linear(num_output, self.out_channels)\n            self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]\n\n    def forward(self, param_feature, input_feature):\n        \"\"\"Forward function for `DynamicConv`.\n\n        Args:\n            param_feature (Tensor): The feature can be used\n                to generate the parameter, has shape\n                (num_all_proposals, in_channels).\n            input_feature (Tensor): Feature that\n                interact with parameters, has shape\n                (num_all_proposals, in_channels, H, W).\n\n        Returns:\n            Tensor: The output feature has shape\n            (num_all_proposals, out_channels).\n        \"\"\"\n        input_feature = input_feature.flatten(2).permute(2, 0, 1)\n\n        input_feature = input_feature.permute(1, 0, 2)\n        parameters = self.dynamic_layer(param_feature)\n\n        param_in = parameters[:, :self.num_params_in].view(\n            -1, self.in_channels, self.feat_channels)\n        param_out = parameters[:, -self.num_params_out:].view(\n            -1, self.feat_channels, self.out_channels)\n\n        # input_feature has shape (num_all_proposals, H*W, in_channels)\n        # param_in has shape (num_all_proposals, in_channels, feat_channels)\n        # feature has shape (num_all_proposals, H*W, feat_channels)\n        features = torch.bmm(input_feature, param_in)\n        features = self.norm_in(features)\n        features = self.activation(features)\n\n        # param_out has shape (batch_size, feat_channels, out_channels)\n        features = torch.bmm(features, param_out)\n        features = self.norm_out(features)\n        features = self.activation(features)\n\n        if self.with_proj:\n            features = features.flatten(1)\n            features = self.fc_layer(features)\n            features = self.fc_norm(features)\n            features = self.activation(features)\n\n        return features\n"
  },
  {
    "path": "mmdet/utils/__init__.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .ascend_util import (batch_images_to_levels,\n                          get_max_num_gt_division_factor, masked_fill)\nfrom .collect_env import collect_env\nfrom .compat_config import compat_cfg\nfrom .logger import get_caller_name, get_root_logger, log_img_scale\nfrom .memory import AvoidCUDAOOM, AvoidOOM\nfrom .misc import find_latest_checkpoint, update_data_root\nfrom .replace_cfg_vals import replace_cfg_vals\nfrom .rfnext import rfnext_init_model\nfrom .setup_env import setup_multi_processes\nfrom .split_batch import split_batch\nfrom .util_distribution import build_ddp, build_dp, get_device\n\n__all__ = [\n    'get_root_logger', 'collect_env', 'find_latest_checkpoint',\n    'update_data_root', 'setup_multi_processes', 'get_caller_name',\n    'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',\n    'get_device', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',\n    'get_max_num_gt_division_factor', 'masked_fill', 'batch_images_to_levels',\n    'rfnext_init_model'\n]\n"
  },
  {
    "path": "mmdet/utils/ascend_util.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef masked_fill(ori_tensor, mask, new_value, neg=False):\n    \"\"\"The Value of ori_tensor is new_value, depending on mask.\n\n    Args:\n        ori_tensor (Tensor): Input tensor.\n        mask (Tensor): If select new_value.\n        new_value(Tensor | scalar): Value selected for ori_tensor.\n        neg (bool): If True, select ori_tensor. If False, select new_value.\n    Returns:\n        ori_tensor: (Tensor): The Value of ori_tensor is new_value,\n            depending on mask.\n    \"\"\"\n    if mask is None:\n        return ori_tensor\n    else:\n        if neg:\n            return ori_tensor * mask + new_value * (1 - mask)\n        else:\n            return ori_tensor * (1 - mask) + new_value * mask\n\n\ndef batch_images_to_levels(target, num_levels):\n    \"\"\"Convert targets by image to targets by feature level.\n\n    [target_img0, target_img1] -> [target_level0, target_level1, ...]  or\n    target_imgs -> [target_level0, target_level1, ...]\n    Args:\n        target (Tensor | List[Tensor]): Tensor split to image levels.\n        num_levels (List[int]): Image levels num.\n    Returns:\n        level_targets: (Tensor): Tensor split by image levels.\n    \"\"\"\n    if not isinstance(target, torch.Tensor):\n        target = torch.stack(target, 0)\n    level_targets = []\n    start = 0\n    for n in num_levels:\n        end = start + n\n        # level_targets.append(target[:, start:end].squeeze(0))\n        level_targets.append(target[:, start:end])\n        start = end\n    return level_targets\n\n\ndef get_max_num_gt_division_factor(gt_nums,\n                                   min_num_gt=32,\n                                   max_num_gt=1024,\n                                   division_factor=2):\n    \"\"\"Count max num of gt.\n\n    Args:\n        gt_nums (List[int]):  Ground truth bboxes num of images.\n        min_num_gt (int): Min num of ground truth bboxes.\n        max_num_gt (int): Max num of ground truth bboxes.\n        division_factor (int): Division factor of result.\n    Returns:\n        max_gt_nums_align: (int): max num of ground truth bboxes.\n    \"\"\"\n    max_gt_nums = max(gt_nums)\n    max_gt_nums_align = min_num_gt\n    while max_gt_nums_align < max_gt_nums:\n        max_gt_nums_align *= division_factor\n    if max_gt_nums_align > max_num_gt:\n        raise RuntimeError\n    return max_gt_nums_align\n"
  },
  {
    "path": "mmdet/utils/collect_env.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.utils import collect_env as collect_base_env\nfrom mmcv.utils import get_git_hash\n\nimport mmdet\n\n\ndef collect_env():\n    \"\"\"Collect the information of the running environments.\"\"\"\n    env_info = collect_base_env()\n    env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]\n    return env_info\n\n\nif __name__ == '__main__':\n    for name, val in collect_env().items():\n        print(f'{name}: {val}')\n"
  },
  {
    "path": "mmdet/utils/compat_config.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\n\nfrom mmcv import ConfigDict\n\n\ndef compat_cfg(cfg):\n    \"\"\"This function would modify some filed to keep the compatibility of\n    config.\n\n    For example, it will move some args which will be deprecated to the correct\n    fields.\n    \"\"\"\n    cfg = copy.deepcopy(cfg)\n    cfg = compat_imgs_per_gpu(cfg)\n    cfg = compat_loader_args(cfg)\n    cfg = compat_runner_args(cfg)\n    return cfg\n\n\ndef compat_runner_args(cfg):\n    if 'runner' not in cfg:\n        cfg.runner = ConfigDict({\n            'type': 'EpochBasedRunner',\n            'max_epochs': cfg.total_epochs\n        })\n        warnings.warn(\n            'config is now expected to have a `runner` section, '\n            'please set `runner` in your config.', UserWarning)\n    else:\n        if 'total_epochs' in cfg:\n            assert cfg.total_epochs == cfg.runner.max_epochs\n    return cfg\n\n\ndef compat_imgs_per_gpu(cfg):\n    cfg = copy.deepcopy(cfg)\n    if 'imgs_per_gpu' in cfg.data:\n        warnings.warn('\"imgs_per_gpu\" is deprecated in MMDet V2.0. '\n                      'Please use \"samples_per_gpu\" instead')\n        if 'samples_per_gpu' in cfg.data:\n            warnings.warn(\n                f'Got \"imgs_per_gpu\"={cfg.data.imgs_per_gpu} and '\n                f'\"samples_per_gpu\"={cfg.data.samples_per_gpu}, \"imgs_per_gpu\"'\n                f'={cfg.data.imgs_per_gpu} is used in this experiments')\n        else:\n            warnings.warn('Automatically set \"samples_per_gpu\"=\"imgs_per_gpu\"='\n                          f'{cfg.data.imgs_per_gpu} in this experiments')\n        cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu\n    return cfg\n\n\ndef compat_loader_args(cfg):\n    \"\"\"Deprecated sample_per_gpu in cfg.data.\"\"\"\n\n    cfg = copy.deepcopy(cfg)\n    if 'train_dataloader' not in cfg.data:\n        cfg.data['train_dataloader'] = ConfigDict()\n    if 'val_dataloader' not in cfg.data:\n        cfg.data['val_dataloader'] = ConfigDict()\n    if 'test_dataloader' not in cfg.data:\n        cfg.data['test_dataloader'] = ConfigDict()\n\n    # special process for train_dataloader\n    if 'samples_per_gpu' in cfg.data:\n\n        samples_per_gpu = cfg.data.pop('samples_per_gpu')\n        assert 'samples_per_gpu' not in \\\n               cfg.data.train_dataloader, ('`samples_per_gpu` are set '\n                                           'in `data` field and ` '\n                                           'data.train_dataloader` '\n                                           'at the same time. '\n                                           'Please only set it in '\n                                           '`data.train_dataloader`. ')\n        cfg.data.train_dataloader['samples_per_gpu'] = samples_per_gpu\n\n    if 'persistent_workers' in cfg.data:\n\n        persistent_workers = cfg.data.pop('persistent_workers')\n        assert 'persistent_workers' not in \\\n               cfg.data.train_dataloader, ('`persistent_workers` are set '\n                                           'in `data` field and ` '\n                                           'data.train_dataloader` '\n                                           'at the same time. '\n                                           'Please only set it in '\n                                           '`data.train_dataloader`. ')\n        cfg.data.train_dataloader['persistent_workers'] = persistent_workers\n\n    if 'workers_per_gpu' in cfg.data:\n\n        workers_per_gpu = cfg.data.pop('workers_per_gpu')\n        cfg.data.train_dataloader['workers_per_gpu'] = workers_per_gpu\n        cfg.data.val_dataloader['workers_per_gpu'] = workers_per_gpu\n        cfg.data.test_dataloader['workers_per_gpu'] = workers_per_gpu\n\n    # special process for val_dataloader\n    if 'samples_per_gpu' in cfg.data.val:\n        # keep default value of `sample_per_gpu` is 1\n        assert 'samples_per_gpu' not in \\\n               cfg.data.val_dataloader, ('`samples_per_gpu` are set '\n                                         'in `data.val` field and ` '\n                                         'data.val_dataloader` at '\n                                         'the same time. '\n                                         'Please only set it in '\n                                         '`data.val_dataloader`. ')\n        cfg.data.val_dataloader['samples_per_gpu'] = \\\n            cfg.data.val.pop('samples_per_gpu')\n    # special process for val_dataloader\n\n    # in case the test dataset is concatenated\n    if isinstance(cfg.data.test, dict):\n        if 'samples_per_gpu' in cfg.data.test:\n            assert 'samples_per_gpu' not in \\\n                   cfg.data.test_dataloader, ('`samples_per_gpu` are set '\n                                              'in `data.test` field and ` '\n                                              'data.test_dataloader` '\n                                              'at the same time. '\n                                              'Please only set it in '\n                                              '`data.test_dataloader`. ')\n\n            cfg.data.test_dataloader['samples_per_gpu'] = \\\n                cfg.data.test.pop('samples_per_gpu')\n\n    elif isinstance(cfg.data.test, list):\n        for ds_cfg in cfg.data.test:\n            if 'samples_per_gpu' in ds_cfg:\n                assert 'samples_per_gpu' not in \\\n                       cfg.data.test_dataloader, ('`samples_per_gpu` are set '\n                                                  'in `data.test` field and ` '\n                                                  'data.test_dataloader` at'\n                                                  ' the same time. '\n                                                  'Please only set it in '\n                                                  '`data.test_dataloader`. ')\n        samples_per_gpu = max(\n            [ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])\n        cfg.data.test_dataloader['samples_per_gpu'] = samples_per_gpu\n\n    return cfg\n"
  },
  {
    "path": "mmdet/utils/contextmanagers.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport asyncio\nimport contextlib\nimport logging\nimport os\nimport time\nfrom typing import List\n\nimport torch\n\nlogger = logging.getLogger(__name__)\n\nDEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))\n\n\n@contextlib.asynccontextmanager\nasync def completed(trace_name='',\n                    name='',\n                    sleep_interval=0.05,\n                    streams: List[torch.cuda.Stream] = None):\n    \"\"\"Async context manager that waits for work to complete on given CUDA\n    streams.\"\"\"\n    if not torch.cuda.is_available():\n        yield\n        return\n\n    stream_before_context_switch = torch.cuda.current_stream()\n    if not streams:\n        streams = [stream_before_context_switch]\n    else:\n        streams = [s if s else stream_before_context_switch for s in streams]\n\n    end_events = [\n        torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams\n    ]\n\n    if DEBUG_COMPLETED_TIME:\n        start = torch.cuda.Event(enable_timing=True)\n        stream_before_context_switch.record_event(start)\n\n        cpu_start = time.monotonic()\n    logger.debug('%s %s starting, streams: %s', trace_name, name, streams)\n    grad_enabled_before = torch.is_grad_enabled()\n    try:\n        yield\n    finally:\n        current_stream = torch.cuda.current_stream()\n        assert current_stream == stream_before_context_switch\n\n        if DEBUG_COMPLETED_TIME:\n            cpu_end = time.monotonic()\n        for i, stream in enumerate(streams):\n            event = end_events[i]\n            stream.record_event(event)\n\n        grad_enabled_after = torch.is_grad_enabled()\n\n        # observed change of torch.is_grad_enabled() during concurrent run of\n        # async_test_bboxes code\n        assert (grad_enabled_before == grad_enabled_after\n                ), 'Unexpected is_grad_enabled() value change'\n\n        are_done = [e.query() for e in end_events]\n        logger.debug('%s %s completed: %s streams: %s', trace_name, name,\n                     are_done, streams)\n        with torch.cuda.stream(stream_before_context_switch):\n            while not all(are_done):\n                await asyncio.sleep(sleep_interval)\n                are_done = [e.query() for e in end_events]\n                logger.debug(\n                    '%s %s completed: %s streams: %s',\n                    trace_name,\n                    name,\n                    are_done,\n                    streams,\n                )\n\n        current_stream = torch.cuda.current_stream()\n        assert current_stream == stream_before_context_switch\n\n        if DEBUG_COMPLETED_TIME:\n            cpu_time = (cpu_end - cpu_start) * 1000\n            stream_times_ms = ''\n            for i, stream in enumerate(streams):\n                elapsed_time = start.elapsed_time(end_events[i])\n                stream_times_ms += f' {stream} {elapsed_time:.2f} ms'\n            logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,\n                        stream_times_ms)\n\n\n@contextlib.asynccontextmanager\nasync def concurrent(streamqueue: asyncio.Queue,\n                     trace_name='concurrent',\n                     name='stream'):\n    \"\"\"Run code concurrently in different streams.\n\n    :param streamqueue: asyncio.Queue instance.\n\n    Queue tasks define the pool of streams used for concurrent execution.\n    \"\"\"\n    if not torch.cuda.is_available():\n        yield\n        return\n\n    initial_stream = torch.cuda.current_stream()\n\n    with torch.cuda.stream(initial_stream):\n        stream = await streamqueue.get()\n        assert isinstance(stream, torch.cuda.Stream)\n\n        try:\n            with torch.cuda.stream(stream):\n                logger.debug('%s %s is starting, stream: %s', trace_name, name,\n                             stream)\n                yield\n                current = torch.cuda.current_stream()\n                assert current == stream\n                logger.debug('%s %s has finished, stream: %s', trace_name,\n                             name, stream)\n        finally:\n            streamqueue.task_done()\n            streamqueue.put_nowait(stream)\n"
  },
  {
    "path": "mmdet/utils/logger.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport inspect\nimport logging\n\nfrom mmcv.utils import get_logger\n\n\ndef get_root_logger(log_file=None, log_level=logging.INFO):\n    \"\"\"Get root logger.\n\n    Args:\n        log_file (str, optional): File path of log. Defaults to None.\n        log_level (int, optional): The level of logger.\n            Defaults to logging.INFO.\n\n    Returns:\n        :obj:`logging.Logger`: The obtained logger\n    \"\"\"\n    logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level)\n\n    return logger\n\n\ndef get_caller_name():\n    \"\"\"Get name of caller method.\"\"\"\n    # this_func_frame = inspect.stack()[0][0]  # i.e., get_caller_name\n    # callee_frame = inspect.stack()[1][0]  # e.g., log_img_scale\n    caller_frame = inspect.stack()[2][0]  # e.g., caller of log_img_scale\n    caller_method = caller_frame.f_code.co_name\n    try:\n        caller_class = caller_frame.f_locals['self'].__class__.__name__\n        return f'{caller_class}.{caller_method}'\n    except KeyError:  # caller is a function\n        return caller_method\n\n\ndef log_img_scale(img_scale, shape_order='hw', skip_square=False):\n    \"\"\"Log image size.\n\n    Args:\n        img_scale (tuple): Image size to be logged.\n        shape_order (str, optional): The order of image shape.\n            'hw' for (height, width) and 'wh' for (width, height).\n            Defaults to 'hw'.\n        skip_square (bool, optional): Whether to skip logging for square\n            img_scale. Defaults to False.\n\n    Returns:\n        bool: Whether to have done logging.\n    \"\"\"\n    if shape_order == 'hw':\n        height, width = img_scale\n    elif shape_order == 'wh':\n        width, height = img_scale\n    else:\n        raise ValueError(f'Invalid shape_order {shape_order}.')\n\n    if skip_square and (height == width):\n        return False\n\n    logger = get_root_logger()\n    caller = get_caller_name()\n    logger.info(f'image shape: height={height}, width={width} in {caller}')\n\n    return True\n"
  },
  {
    "path": "mmdet/utils/memory.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom collections import abc\nfrom contextlib import contextmanager\nfrom functools import wraps\n\nimport torch\n\nfrom mmdet.utils import get_root_logger\n\n\ndef cast_tensor_type(inputs, src_type=None, dst_type=None):\n    \"\"\"Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``.\n\n    Args:\n        inputs: Inputs that to be casted.\n        src_type (torch.dtype | torch.device): Source type.\n        src_type (torch.dtype | torch.device): Destination type.\n\n    Returns:\n        The same type with inputs, but all contained Tensors have been cast.\n    \"\"\"\n    assert dst_type is not None\n    if isinstance(inputs, torch.Tensor):\n        if isinstance(dst_type, torch.device):\n            # convert Tensor to dst_device\n            if hasattr(inputs, 'to') and \\\n                    hasattr(inputs, 'device') and \\\n                    (inputs.device == src_type or src_type is None):\n                return inputs.to(dst_type)\n            else:\n                return inputs\n        else:\n            # convert Tensor to dst_dtype\n            if hasattr(inputs, 'to') and \\\n                    hasattr(inputs, 'dtype') and \\\n                    (inputs.dtype == src_type or src_type is None):\n                return inputs.to(dst_type)\n            else:\n                return inputs\n        # we need to ensure that the type of inputs to be casted are the same\n        # as the argument `src_type`.\n    elif isinstance(inputs, abc.Mapping):\n        return type(inputs)({\n            k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type)\n            for k, v in inputs.items()\n        })\n    elif isinstance(inputs, abc.Iterable):\n        return type(inputs)(\n            cast_tensor_type(item, src_type=src_type, dst_type=dst_type)\n            for item in inputs)\n    # TODO: Currently not supported\n    # elif isinstance(inputs, InstanceData):\n    #     for key, value in inputs.items():\n    #         inputs[key] = cast_tensor_type(\n    #             value, src_type=src_type, dst_type=dst_type)\n    #     return inputs\n    else:\n        return inputs\n\n\n@contextmanager\ndef _ignore_torch_cuda_oom():\n    \"\"\"A context which ignores CUDA OOM exception from pytorch.\n\n    Code is modified from\n    <https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py>  # noqa: E501\n    \"\"\"\n    try:\n        yield\n    except RuntimeError as e:\n        # NOTE: the string may change?\n        if 'CUDA out of memory. ' in str(e):\n            pass\n        else:\n            raise\n\n\nclass AvoidOOM:\n    \"\"\"Try to convert inputs to FP16 and CPU if got a PyTorch's CUDA Out of\n    Memory error. It will do the following steps:\n\n        1. First retry after calling `torch.cuda.empty_cache()`.\n        2. If that still fails, it will then retry by converting inputs\n          to FP16.\n        3. If that still fails trying to convert inputs to CPUs.\n          In this case, it expects the function to dispatch to\n          CPU implementation.\n\n    Args:\n        to_cpu (bool): Whether to convert outputs to CPU if get an OOM\n            error. This will slow down the code significantly.\n            Defaults to True.\n        test (bool): Skip `_ignore_torch_cuda_oom` operate that can use\n            lightweight data in unit test, only used in\n            test unit. Defaults to False.\n\n    Examples:\n        >>> from mmdet.utils.memory import AvoidOOM\n        >>> AvoidCUDAOOM = AvoidOOM()\n        >>> output = AvoidOOM.retry_if_cuda_oom(\n        >>>     some_torch_function)(input1, input2)\n        >>> # To use as a decorator\n        >>> # from mmdet.utils import AvoidCUDAOOM\n        >>> @AvoidCUDAOOM.retry_if_cuda_oom\n        >>> def function(*args, **kwargs):\n        >>>     return None\n    ```\n\n    Note:\n        1. The output may be on CPU even if inputs are on GPU. Processing\n            on CPU will slow down the code significantly.\n        2. When converting inputs to CPU, it will only look at each argument\n            and check if it has `.device` and `.to` for conversion. Nested\n            structures of tensors are not supported.\n        3. Since the function might be called more than once, it has to be\n            stateless.\n    \"\"\"\n\n    def __init__(self, to_cpu=True, test=False):\n        self.to_cpu = to_cpu\n        self.test = test\n\n    def retry_if_cuda_oom(self, func):\n        \"\"\"Makes a function retry itself after encountering pytorch's CUDA OOM\n        error.\n\n        The implementation logic is referred to\n        https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py\n\n        Args:\n            func: a stateless callable that takes tensor-like objects\n                as arguments.\n        Returns:\n            func: a callable which retries `func` if OOM is encountered.\n        \"\"\"  # noqa: W605\n\n        @wraps(func)\n        def wrapped(*args, **kwargs):\n\n            # raw function\n            if not self.test:\n                with _ignore_torch_cuda_oom():\n                    return func(*args, **kwargs)\n\n                # Clear cache and retry\n                torch.cuda.empty_cache()\n                with _ignore_torch_cuda_oom():\n                    return func(*args, **kwargs)\n\n            # get the type and device of first tensor\n            dtype, device = None, None\n            values = args + tuple(kwargs.values())\n            for value in values:\n                if isinstance(value, torch.Tensor):\n                    dtype = value.dtype\n                    device = value.device\n                    break\n            if dtype is None or device is None:\n                raise ValueError('There is no tensor in the inputs, '\n                                 'cannot get dtype and device.')\n\n            # Convert to FP16\n            fp16_args = cast_tensor_type(args, dst_type=torch.half)\n            fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half)\n            logger = get_root_logger()\n            logger.warning(f'Attempting to copy inputs of {str(func)} '\n                           'to FP16 due to CUDA OOM')\n\n            # get input tensor type, the output type will same as\n            # the first parameter type.\n            with _ignore_torch_cuda_oom():\n                output = func(*fp16_args, **fp16_kwargs)\n                output = cast_tensor_type(\n                    output, src_type=torch.half, dst_type=dtype)\n                if not self.test:\n                    return output\n            logger.warning('Using FP16 still meet CUDA OOM')\n\n            # Try on CPU. This will slow down the code significantly,\n            # therefore print a notice.\n            if self.to_cpu:\n                logger.warning(f'Attempting to copy inputs of {str(func)} '\n                               'to CPU due to CUDA OOM')\n                cpu_device = torch.empty(0).device\n                cpu_args = cast_tensor_type(args, dst_type=cpu_device)\n                cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device)\n\n                # convert outputs to GPU\n                with _ignore_torch_cuda_oom():\n                    logger.warning(f'Convert outputs to GPU (device={device})')\n                    output = func(*cpu_args, **cpu_kwargs)\n                    output = cast_tensor_type(\n                        output, src_type=cpu_device, dst_type=device)\n                    return output\n\n                warnings.warn('Cannot convert output to GPU due to CUDA OOM, '\n                              'the output is now on CPU, which might cause '\n                              'errors if the output need to interact with GPU '\n                              'data in subsequent operations')\n                logger.warning('Cannot convert output to GPU due to '\n                               'CUDA OOM, the output is on CPU now.')\n\n                return func(*cpu_args, **cpu_kwargs)\n            else:\n                # may still get CUDA OOM error\n                return func(*args, **kwargs)\n\n        return wrapped\n\n\n# To use AvoidOOM as a decorator\nAvoidCUDAOOM = AvoidOOM()\n"
  },
  {
    "path": "mmdet/utils/misc.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport glob\nimport os\nimport os.path as osp\nimport warnings\n\nimport mmcv\nimport torch\nfrom mmcv.utils import TORCH_VERSION, digit_version, print_log\n\n\ndef find_latest_checkpoint(path, suffix='pth'):\n    \"\"\"Find the latest checkpoint from the working directory.\n\n    Args:\n        path(str): The path to find checkpoints.\n        suffix(str): File extension.\n            Defaults to pth.\n\n    Returns:\n        latest_path(str | None): File path of the latest checkpoint.\n    References:\n        .. [1] https://github.com/microsoft/SoftTeacher\n                  /blob/main/ssod/utils/patch.py\n    \"\"\"\n    if not osp.exists(path):\n        warnings.warn('The path of checkpoints does not exist.')\n        return None\n    if osp.exists(osp.join(path, f'latest.{suffix}')):\n        return osp.join(path, f'latest.{suffix}')\n\n    checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))\n    if len(checkpoints) == 0:\n        warnings.warn('There are no checkpoints in the path.')\n        return None\n    latest = -1\n    latest_path = None\n    for checkpoint in checkpoints:\n        count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])\n        if count > latest:\n            latest = count\n            latest_path = checkpoint\n    return latest_path\n\n\ndef update_data_root(cfg, logger=None):\n    \"\"\"Update data root according to env MMDET_DATASETS.\n\n    If set env MMDET_DATASETS, update cfg.data_root according to\n    MMDET_DATASETS. Otherwise, using cfg.data_root as default.\n\n    Args:\n        cfg (mmcv.Config): The model config need to modify\n        logger (logging.Logger | str | None): the way to print msg\n    \"\"\"\n    assert isinstance(cfg, mmcv.Config), \\\n        f'cfg got wrong type: {type(cfg)}, expected mmcv.Config'\n\n    if 'MMDET_DATASETS' in os.environ:\n        dst_root = os.environ['MMDET_DATASETS']\n        print_log(f'MMDET_DATASETS has been set to be {dst_root}.'\n                  f'Using {dst_root} as data root.')\n    else:\n        return\n\n    assert isinstance(cfg, mmcv.Config), \\\n        f'cfg got wrong type: {type(cfg)}, expected mmcv.Config'\n\n    def update(cfg, src_str, dst_str):\n        for k, v in cfg.items():\n            if isinstance(v, mmcv.ConfigDict):\n                update(cfg[k], src_str, dst_str)\n            if isinstance(v, str) and src_str in v:\n                cfg[k] = v.replace(src_str, dst_str)\n\n    update(cfg.data, cfg.data_root, dst_root)\n    cfg.data_root = dst_root\n\n\n_torch_version_div_indexing = (\n    'parrots' not in TORCH_VERSION\n    and digit_version(TORCH_VERSION) >= digit_version('1.8'))\n\n\ndef floordiv(dividend, divisor, rounding_mode='trunc'):\n    if _torch_version_div_indexing:\n        return torch.div(dividend, divisor, rounding_mode=rounding_mode)\n    else:\n        return dividend // divisor\n"
  },
  {
    "path": "mmdet/utils/profiling.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport contextlib\nimport sys\nimport time\n\nimport torch\n\nif sys.version_info >= (3, 7):\n\n    @contextlib.contextmanager\n    def profile_time(trace_name,\n                     name,\n                     enabled=True,\n                     stream=None,\n                     end_stream=None):\n        \"\"\"Print time spent by CPU and GPU.\n\n        Useful as a temporary context manager to find sweet spots of code\n        suitable for async implementation.\n        \"\"\"\n        if (not enabled) or not torch.cuda.is_available():\n            yield\n            return\n        stream = stream if stream else torch.cuda.current_stream()\n        end_stream = end_stream if end_stream else stream\n        start = torch.cuda.Event(enable_timing=True)\n        end = torch.cuda.Event(enable_timing=True)\n        stream.record_event(start)\n        try:\n            cpu_start = time.monotonic()\n            yield\n        finally:\n            cpu_end = time.monotonic()\n            end_stream.record_event(end)\n            end.synchronize()\n            cpu_time = (cpu_end - cpu_start) * 1000\n            gpu_time = start.elapsed_time(end)\n            msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms '\n            msg += f'gpu_time {gpu_time:.2f} ms stream {stream}'\n            print(msg, end_stream)\n"
  },
  {
    "path": "mmdet/utils/replace_cfg_vals.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport re\n\nfrom mmcv.utils import Config\n\n\ndef replace_cfg_vals(ori_cfg):\n    \"\"\"Replace the string \"${key}\" with the corresponding value.\n\n    Replace the \"${key}\" with the value of ori_cfg.key in the config. And\n    support replacing the chained ${key}. Such as, replace \"${key0.key1}\"\n    with the value of cfg.key0.key1. Code is modified from `vars.py\n    < https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_  # noqa: E501\n\n    Args:\n        ori_cfg (mmcv.utils.config.Config):\n            The origin config with \"${key}\" generated from a file.\n\n    Returns:\n        updated_cfg [mmcv.utils.config.Config]:\n            The config with \"${key}\" replaced by the corresponding value.\n    \"\"\"\n\n    def get_value(cfg, key):\n        for k in key.split('.'):\n            cfg = cfg[k]\n        return cfg\n\n    def replace_value(cfg):\n        if isinstance(cfg, dict):\n            return {key: replace_value(value) for key, value in cfg.items()}\n        elif isinstance(cfg, list):\n            return [replace_value(item) for item in cfg]\n        elif isinstance(cfg, tuple):\n            return tuple([replace_value(item) for item in cfg])\n        elif isinstance(cfg, str):\n            # the format of string cfg may be:\n            # 1) \"${key}\", which will be replaced with cfg.key directly\n            # 2) \"xxx${key}xxx\" or \"xxx${key1}xxx${key2}xxx\",\n            # which will be replaced with the string of the cfg.key\n            keys = pattern_key.findall(cfg)\n            values = [get_value(ori_cfg, key[2:-1]) for key in keys]\n            if len(keys) == 1 and keys[0] == cfg:\n                # the format of string cfg is \"${key}\"\n                cfg = values[0]\n            else:\n                for key, value in zip(keys, values):\n                    # the format of string cfg is\n                    # \"xxx${key}xxx\" or \"xxx${key1}xxx${key2}xxx\"\n                    assert not isinstance(value, (dict, list, tuple)), \\\n                        f'for the format of string cfg is ' \\\n                        f\"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', \" \\\n                        f\"the type of the value of '${key}' \" \\\n                        f'can not be dict, list, or tuple' \\\n                        f'but you input {type(value)} in {cfg}'\n                    cfg = cfg.replace(key, str(value))\n            return cfg\n        else:\n            return cfg\n\n    # the pattern of string \"${key}\"\n    pattern_key = re.compile(r'\\$\\{[a-zA-Z\\d_.]*\\}')\n    # the type of ori_cfg._cfg_dict is mmcv.utils.config.ConfigDict\n    updated_cfg = Config(\n        replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename)\n    # replace the model with model_wrapper\n    if updated_cfg.get('model_wrapper', None) is not None:\n        updated_cfg.model = updated_cfg.model_wrapper\n        updated_cfg.pop('model_wrapper')\n    return updated_cfg\n"
  },
  {
    "path": "mmdet/utils/rfnext.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\ntry:\n    from mmcv.cnn import RFSearchHook\nexcept ImportError:\n    RFSearchHook = None\n\n\ndef rfnext_init_model(detector, cfg):\n    \"\"\"Rcecptive field search via dilation rates.\n\n    Please refer to `RF-Next: Efficient Receptive Field\n    Search for Convolutional Neural Networks\n    <https://arxiv.org/abs/2206.06637>`_ for more details.\n\n    Args:\n        detector (nn.Module): The detector before initializing RF-Next.\n        cfg (mmcv.Config): The config for RF-Next.\n            If the RFSearchHook is defined in the cfg.custom_hooks,\n            the detector will be initialized for RF-Next.\n    \"\"\"\n\n    if cfg.get('custom_hooks', None) is None:\n        return\n    custom_hook_types = [hook['type'] for hook in cfg.custom_hooks]\n    if 'RFSearchHook' not in custom_hook_types:\n        return\n\n    index = custom_hook_types.index('RFSearchHook')\n    rfsearch_cfg = cfg.custom_hooks[index]\n    assert rfsearch_cfg['type'] == 'RFSearchHook'\n\n    assert RFSearchHook is not None, 'Please install mmcv > 1.7.0'\n\n    # initlize a RFSearchHook\n    rfsearch_warp = RFSearchHook(\n        mode=rfsearch_cfg.get('mode', 'search'),\n        config=rfsearch_cfg.get('config', None),\n        rfstructure_file=rfsearch_cfg.get('rfstructure_file', None),\n        by_epoch=rfsearch_cfg.get('by_epoch', True),\n        verbose=rfsearch_cfg.get('verbose', True),\n    )\n    rfsearch_warp.init_model(detector)\n    rfsearch_cfg['rfstructure_file'] = None\n"
  },
  {
    "path": "mmdet/utils/setup_env.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport platform\nimport warnings\n\nimport cv2\nimport torch.multiprocessing as mp\n\n\ndef setup_multi_processes(cfg):\n    \"\"\"Setup multi-processing environment variables.\"\"\"\n    # set multi-process start method as `fork` to speed up the training\n    if platform.system() != 'Windows':\n        mp_start_method = cfg.get('mp_start_method', 'fork')\n        current_method = mp.get_start_method(allow_none=True)\n        if current_method is not None and current_method != mp_start_method:\n            warnings.warn(\n                f'Multi-processing start method `{mp_start_method}` is '\n                f'different from the previous setting `{current_method}`.'\n                f'It will be force set to `{mp_start_method}`. You can change '\n                f'this behavior by changing `mp_start_method` in your config.')\n        mp.set_start_method(mp_start_method, force=True)\n\n    # disable opencv multithreading to avoid system being overloaded\n    opencv_num_threads = cfg.get('opencv_num_threads', 0)\n    cv2.setNumThreads(opencv_num_threads)\n\n    # setup OMP threads\n    # This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py  # noqa\n    workers_per_gpu = cfg.data.get('workers_per_gpu', 1)\n    if 'train_dataloader' in cfg.data:\n        workers_per_gpu = \\\n            max(cfg.data.train_dataloader.get('workers_per_gpu', 1),\n                workers_per_gpu)\n\n    if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1:\n        omp_num_threads = 1\n        warnings.warn(\n            f'Setting OMP_NUM_THREADS environment variable for each process '\n            f'to be {omp_num_threads} in default, to avoid your system being '\n            f'overloaded, please further tune the variable for optimal '\n            f'performance in your application as needed.')\n        os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)\n\n    # setup MKL threads\n    if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1:\n        mkl_num_threads = 1\n        warnings.warn(\n            f'Setting MKL_NUM_THREADS environment variable for each process '\n            f'to be {mkl_num_threads} in default, to avoid your system being '\n            f'overloaded, please further tune the variable for optimal '\n            f'performance in your application as needed.')\n        os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)\n"
  },
  {
    "path": "mmdet/utils/split_batch.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef split_batch(img, img_metas, kwargs):\n    \"\"\"Split data_batch by tags.\n\n    Code is modified from\n    <https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/structure_utils.py> # noqa: E501\n\n    Args:\n        img (Tensor): of shape (N, C, H, W) encoding input images.\n            Typically these should be mean centered and std scaled.\n        img_metas (list[dict]): List of image info dict where each dict\n            has: 'img_shape', 'scale_factor', 'flip', and may also contain\n            'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n            For details on the values of these keys, see\n            :class:`mmdet.datasets.pipelines.Collect`.\n        kwargs (dict): Specific to concrete implementation.\n\n    Returns:\n        data_groups (dict): a dict that data_batch splited by tags,\n            such as 'sup', 'unsup_teacher', and 'unsup_student'.\n    \"\"\"\n\n    # only stack img in the batch\n    def fuse_list(obj_list, obj):\n        return torch.stack(obj_list) if isinstance(obj,\n                                                   torch.Tensor) else obj_list\n\n    # select data with tag from data_batch\n    def select_group(data_batch, current_tag):\n        group_flag = [tag == current_tag for tag in data_batch['tag']]\n        return {\n            k: fuse_list([vv for vv, gf in zip(v, group_flag) if gf], v)\n            for k, v in data_batch.items()\n        }\n\n    kwargs.update({'img': img, 'img_metas': img_metas})\n    kwargs.update({'tag': [meta['tag'] for meta in img_metas]})\n    tags = list(set(kwargs['tag']))\n    data_groups = {tag: select_group(kwargs, tag) for tag in tags}\n    for tag, group in data_groups.items():\n        group.pop('tag')\n    return data_groups\n"
  },
  {
    "path": "mmdet/utils/util_distribution.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.parallel import MMDataParallel, MMDistributedDataParallel\n\ndp_factory = {'cuda': MMDataParallel, 'cpu': MMDataParallel}\n\nddp_factory = {'cuda': MMDistributedDataParallel}\n\n\ndef build_dp(model, device='cuda', dim=0, *args, **kwargs):\n    \"\"\"build DataParallel module by device type.\n\n    if device is cuda, return a MMDataParallel model; if device is mlu,\n    return a MLUDataParallel model.\n\n    Args:\n        model (:class:`nn.Module`): model to be parallelized.\n        device (str): device type, cuda, cpu or mlu. Defaults to cuda.\n        dim (int): Dimension used to scatter the data. Defaults to 0.\n\n    Returns:\n        nn.Module: the model to be parallelized.\n    \"\"\"\n    if device == 'npu':\n        from mmcv.device.npu import NPUDataParallel\n        dp_factory['npu'] = NPUDataParallel\n        torch.npu.set_device(kwargs['device_ids'][0])\n        torch.npu.set_compile_mode(jit_compile=False)\n        model = model.npu()\n    elif device == 'cuda':\n        model = model.cuda(kwargs['device_ids'][0])\n    elif device == 'mlu':\n        from mmcv.device.mlu import MLUDataParallel\n        dp_factory['mlu'] = MLUDataParallel\n        model = model.mlu()\n\n    return dp_factory[device](model, dim=dim, *args, **kwargs)\n\n\ndef build_ddp(model, device='cuda', *args, **kwargs):\n    \"\"\"Build DistributedDataParallel module by device type.\n\n    If device is cuda, return a MMDistributedDataParallel model;\n    if device is mlu, return a MLUDistributedDataParallel model.\n\n    Args:\n        model (:class:`nn.Module`): module to be parallelized.\n        device (str): device type, mlu or cuda.\n\n    Returns:\n        :class:`nn.Module`: the module to be parallelized\n\n    References:\n        .. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel.\n                     DistributedDataParallel.html\n    \"\"\"\n    assert device in ['cuda', 'mlu',\n                      'npu'], 'Only available for cuda or mlu or npu devices.'\n    if device == 'npu':\n        from mmcv.device.npu import NPUDistributedDataParallel\n        torch.npu.set_compile_mode(jit_compile=False)\n        ddp_factory['npu'] = NPUDistributedDataParallel\n        model = model.npu()\n    elif device == 'cuda':\n        model = model.cuda()\n    elif device == 'mlu':\n        from mmcv.device.mlu import MLUDistributedDataParallel\n        ddp_factory['mlu'] = MLUDistributedDataParallel\n        model = model.mlu()\n\n    return ddp_factory[device](model, *args, **kwargs)\n\n\ndef is_npu_available():\n    \"\"\"Returns a bool indicating if NPU is currently available.\"\"\"\n    return hasattr(torch, 'npu') and torch.npu.is_available()\n\n\ndef is_mlu_available():\n    \"\"\"Returns a bool indicating if MLU is currently available.\"\"\"\n    return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()\n\n\ndef get_device():\n    \"\"\"Returns an available device, cpu, cuda or mlu.\"\"\"\n    is_device_available = {\n        'npu': is_npu_available(),\n        'cuda': torch.cuda.is_available(),\n        'mlu': is_mlu_available()\n    }\n    device_list = [k for k, v in is_device_available.items() if v]\n    return device_list[0] if len(device_list) >= 1 else 'cpu'\n"
  },
  {
    "path": "mmdet/utils/util_mixins.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"This module defines the :class:`NiceRepr` mixin class, which defines a\n``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``\nmethod, which you must define. This means you only have to overload one\nfunction instead of two.  Furthermore, if the object defines a ``__len__``\nmethod, then the ``__nice__`` method defaults to something sensible, otherwise\nit is treated as abstract and raises ``NotImplementedError``.\n\nTo use simply have your object inherit from :class:`NiceRepr`\n(multi-inheritance should be ok).\n\nThis code was copied from the ubelt library: https://github.com/Erotemic/ubelt\n\nExample:\n    >>> # Objects that define __nice__ have a default __str__ and __repr__\n    >>> class Student(NiceRepr):\n    ...    def __init__(self, name):\n    ...        self.name = name\n    ...    def __nice__(self):\n    ...        return self.name\n    >>> s1 = Student('Alice')\n    >>> s2 = Student('Bob')\n    >>> print(f's1 = {s1}')\n    >>> print(f's2 = {s2}')\n    s1 = <Student(Alice)>\n    s2 = <Student(Bob)>\n\nExample:\n    >>> # Objects that define __len__ have a default __nice__\n    >>> class Group(NiceRepr):\n    ...    def __init__(self, data):\n    ...        self.data = data\n    ...    def __len__(self):\n    ...        return len(self.data)\n    >>> g = Group([1, 2, 3])\n    >>> print(f'g = {g}')\n    g = <Group(3)>\n\"\"\"\nimport warnings\n\n\nclass NiceRepr:\n    \"\"\"Inherit from this class and define ``__nice__`` to \"nicely\" print your\n    objects.\n\n    Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function\n    Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.\n    If the inheriting class has a ``__len__``, method then the default\n    ``__nice__`` method will return its length.\n\n    Example:\n        >>> class Foo(NiceRepr):\n        ...    def __nice__(self):\n        ...        return 'info'\n        >>> foo = Foo()\n        >>> assert str(foo) == '<Foo(info)>'\n        >>> assert repr(foo).startswith('<Foo(info) at ')\n\n    Example:\n        >>> class Bar(NiceRepr):\n        ...    pass\n        >>> bar = Bar()\n        >>> import pytest\n        >>> with pytest.warns(None) as record:\n        >>>     assert 'object at' in str(bar)\n        >>>     assert 'object at' in repr(bar)\n\n    Example:\n        >>> class Baz(NiceRepr):\n        ...    def __len__(self):\n        ...        return 5\n        >>> baz = Baz()\n        >>> assert str(baz) == '<Baz(5)>'\n    \"\"\"\n\n    def __nice__(self):\n        \"\"\"str: a \"nice\" summary string describing this module\"\"\"\n        if hasattr(self, '__len__'):\n            # It is a common pattern for objects to use __len__ in __nice__\n            # As a convenience we define a default __nice__ for these objects\n            return str(len(self))\n        else:\n            # In all other cases force the subclass to overload __nice__\n            raise NotImplementedError(\n                f'Define the __nice__ method for {self.__class__!r}')\n\n    def __repr__(self):\n        \"\"\"str: the string of the module\"\"\"\n        try:\n            nice = self.__nice__()\n            classname = self.__class__.__name__\n            return f'<{classname}({nice}) at {hex(id(self))}>'\n        except NotImplementedError as ex:\n            warnings.warn(str(ex), category=RuntimeWarning)\n            return object.__repr__(self)\n\n    def __str__(self):\n        \"\"\"str: the string of the module\"\"\"\n        try:\n            classname = self.__class__.__name__\n            nice = self.__nice__()\n            return f'<{classname}({nice})>'\n        except NotImplementedError as ex:\n            warnings.warn(str(ex), category=RuntimeWarning)\n            return object.__repr__(self)\n"
  },
  {
    "path": "mmdet/utils/util_random.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"Helpers for random number generators.\"\"\"\nimport numpy as np\n\n\ndef ensure_rng(rng=None):\n    \"\"\"Coerces input into a random number generator.\n\n    If the input is None, then a global random state is returned.\n\n    If the input is a numeric value, then that is used as a seed to construct a\n    random state. Otherwise the input is returned as-is.\n\n    Adapted from [1]_.\n\n    Args:\n        rng (int | numpy.random.RandomState | None):\n            if None, then defaults to the global rng. Otherwise this can be an\n            integer or a RandomState class\n    Returns:\n        (numpy.random.RandomState) : rng -\n            a numpy random number generator\n\n    References:\n        .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270  # noqa: E501\n    \"\"\"\n\n    if rng is None:\n        rng = np.random.mtrand._rand\n    elif isinstance(rng, int):\n        rng = np.random.RandomState(rng)\n    else:\n        rng = rng\n    return rng\n"
  },
  {
    "path": "mmdet/version.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\n\n__version__ = '2.28.2'\nshort_version = __version__\n\n\ndef parse_version_info(version_str):\n    version_info = []\n    for x in version_str.split('.'):\n        if x.isdigit():\n            version_info.append(int(x))\n        elif x.find('rc') != -1:\n            patch_version = x.split('rc')\n            version_info.append(int(patch_version[0]))\n            version_info.append(f'rc{patch_version[1]}')\n    return tuple(version_info)\n\n\nversion_info = parse_version_info(__version__)\n"
  },
  {
    "path": "projects/configs/_base_/datasets/coco_detection.py",
    "content": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(type='LoadAnnotations', with_bbox=True),\n    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n    dict(type='RandomFlip', flip_ratio=0.5),\n    dict(type='Normalize', **img_norm_cfg),\n    dict(type='Pad', size_divisor=32),\n    dict(type='DefaultFormatBundle'),\n    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=32),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img']),\n        ])\n]\ndata = dict(\n    samples_per_gpu=2,\n    workers_per_gpu=2,\n    train=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_train2017.json',\n        img_prefix=data_root + 'train2017/',\n        pipeline=train_pipeline),\n    val=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline),\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\nevaluation = dict(interval=1, metric='bbox')\n"
  },
  {
    "path": "projects/configs/_base_/datasets/coco_instance.py",
    "content": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),\n    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n    dict(type='RandomFlip', flip_ratio=0.5),\n    dict(type='Normalize', **img_norm_cfg),\n    dict(type='Pad', size_divisor=32),\n    dict(type='DefaultFormatBundle'),\n    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=32),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img']),\n        ])\n]\ndata = dict(\n    samples_per_gpu=2,\n    workers_per_gpu=2,\n    train=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_train2017.json',\n        img_prefix=data_root + 'train2017/',\n        pipeline=train_pipeline),\n    val=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline),\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\nevaluation = dict(metric=['bbox', 'segm'])\n"
  },
  {
    "path": "projects/configs/_base_/datasets/coco_panoptic.py",
    "content": "# dataset settings\ndataset_type = 'CocoPanopticDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\ntrain_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='LoadPanopticAnnotations',\n        with_bbox=True,\n        with_mask=True,\n        with_seg=True),\n    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),\n    dict(type='RandomFlip', flip_ratio=0.5),\n    dict(type='Normalize', **img_norm_cfg),\n    dict(type='Pad', size_divisor=32),\n    dict(type='SegRescale', scale_factor=1 / 4),\n    dict(type='DefaultFormatBundle'),\n    dict(\n        type='Collect',\n        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),\n]\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=32),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img']),\n        ])\n]\ndata = dict(\n    samples_per_gpu=2,\n    workers_per_gpu=2,\n    train=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/panoptic_train2017.json',\n        img_prefix=data_root + 'train2017/',\n        seg_prefix=data_root + 'annotations/panoptic_train2017/',\n        pipeline=train_pipeline),\n    val=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/panoptic_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        seg_prefix=data_root + 'annotations/panoptic_val2017/',\n        pipeline=test_pipeline),\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/panoptic_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        seg_prefix=data_root + 'annotations/panoptic_val2017/',\n        pipeline=test_pipeline))\nevaluation = dict(interval=1, metric=['PQ'])\n"
  },
  {
    "path": "projects/configs/_base_/default_runtime.py",
    "content": "checkpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n    interval=50,\n    hooks=[\n        dict(type='TextLoggerHook'),\n        # dict(type='TensorboardLoggerHook')\n    ])\n# yapf:enable\ncustom_hooks = [dict(type='NumClassCheckHook')]\n\ndist_params = dict(backend='nccl')\nlog_level = 'INFO'\nload_from = None\nresume_from = None\nworkflow = [('train', 1)]\n\n# disable opencv multithreading to avoid system being overloaded\nopencv_num_threads = 0\n# set multi-process start method as `fork` to speed up the training\nmp_start_method = 'fork'\n\n# Default setting for scaling LR automatically\n#   - `enable` means enable scaling LR automatically\n#       or not by default.\n#   - `base_batch_size` = (8 GPUs) x (2 samples per GPU).\nauto_scale_lr = dict(enable=False, base_batch_size=16)\n\n# placeholder\ntotal_epochs = 1\n"
  },
  {
    "path": "projects/configs/focalnet_dino/focalnet-l-dino_sam-vit-b.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='focalnet_dino',\n    det_wrapper_cfg=dict(num_classes=91,\n                         param_dict_type='default',\n                         ddetr_lr_param=False,\n                         onecyclelr=False,\n                         modelname='dino',\n                         frozen_weights=None,\n                         backbone='focalnet_L_384_22k_fl4',\n                         focal_levels=4,\n                         focal_windows=3,\n                         use_checkpoint=False,\n                         dilation=False,\n                         position_embedding='sine',\n                         pe_temperatureH=20,\n                         pe_temperatureW=20,\n                         return_interm_indices=[0, 1, 2, 3],\n                         backbone_freeze_keywords=None,\n                         enc_layers=6,\n                         dec_layers=6,\n                         unic_layers=0,\n                         pre_norm=False,\n                         dim_feedforward=2048,\n                         hidden_dim=256,\n                         dropout=0.0,\n                         nheads=8,\n                         num_queries=900,\n                         query_dim=4,\n                         num_patterns=0,\n                         pdetr3_bbox_embed_diff_each_layer=False,\n                         pdetr3_refHW=-1,\n                         random_refpoints_xy=False,\n                         fix_refpoints_hw=-1,\n                         dabdetr_yolo_like_anchor_update=False,\n                         dabdetr_deformable_encoder=False,\n                         dabdetr_deformable_decoder=False,\n                         use_deformable_box_attn=False,\n                         box_attn_type='roi_align',\n                         dec_layer_number=None,\n                         num_feature_levels=5,\n                         enc_n_points=4,\n                         dec_n_points=4,\n                         decoder_layer_noise=False,\n                         dln_xy_noise=0.2,\n                         dln_hw_noise=0.2,\n                         add_channel_attention=False,\n                         add_pos_value=False,\n                         two_stage_type='standard',\n                         two_stage_pat_embed=0,\n                         two_stage_add_query_num=0,\n                         two_stage_bbox_embed_share=False,\n                         two_stage_class_embed_share=False,\n                         two_stage_learn_wh=False,\n                         two_stage_default_hw=0.05,\n                         two_stage_keep_all_tokens=False,\n                         num_select=300,\n                         transformer_activation='relu',\n                         batch_norm_type='FrozenBatchNorm2d',\n                         masks=False,\n                         aux_loss=True,\n                         set_cost_class=2.0,\n                         set_cost_bbox=5.0,\n                         set_cost_giou=2.0,\n                         no_interm_box_loss=False,\n                         focal_alpha=0.25,\n                         decoder_sa_type='sa',  # ['sa', 'ca_label', 'ca_content']\n                         matcher_type='HungarianMatcher',  # or SimpleMinsumMatcher\n                         decoder_module_seq=['sa', 'ca', 'ffn'],\n                         nms_iou_threshold=-1,\n                         dec_pred_bbox_embed_share=True,\n                         dec_pred_class_embed_share=True,\n                         use_dn=False,\n                         dn_number=100,\n                         dn_box_noise_scale=0.4,\n                         dn_label_noise_ratio=0.5,\n                         embed_init_tgt=True,\n                         dn_labelbook_size=91,\n                         match_unstable_error=True,\n                         # for ema\n                         use_ema=False,\n                         ema_decay=0.9997,\n                         ema_epoch=0,\n                         use_detached_boxes_dec_out=False),\n    det_model_ckpt='ckpt/focalnet_l_dino.pth',\n    num_classes=80,\n    model_type='vit_b',\n    sam_checkpoint='ckpt/sam_vit_b_01ec64.pth',\n    use_sam_iou=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/focalnet_dino/focalnet-l-dino_sam-vit-h.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='focalnet_dino',\n    det_wrapper_cfg=dict(num_classes=91,\n                   param_dict_type='default',\n                   ddetr_lr_param=False,\n                   onecyclelr=False,\n                   modelname='dino',\n                   frozen_weights=None,\n                   backbone='focalnet_L_384_22k_fl4',\n                   focal_levels=4,\n                   focal_windows=3,\n                   use_checkpoint=False,\n                   dilation=False,\n                   position_embedding='sine',\n                   pe_temperatureH=20,\n                   pe_temperatureW=20,\n                   return_interm_indices=[0, 1, 2, 3],\n                   backbone_freeze_keywords=None,\n                   enc_layers=6,\n                   dec_layers=6,\n                   unic_layers=0,\n                   pre_norm=False,\n                   dim_feedforward=2048,\n                   hidden_dim=256,\n                   dropout=0.0,\n                   nheads=8,\n                   num_queries=900,\n                   query_dim=4,\n                   num_patterns=0,\n                   pdetr3_bbox_embed_diff_each_layer=False,\n                   pdetr3_refHW=-1,\n                   random_refpoints_xy=False,\n                   fix_refpoints_hw=-1,\n                   dabdetr_yolo_like_anchor_update=False,\n                   dabdetr_deformable_encoder=False,\n                   dabdetr_deformable_decoder=False,\n                   use_deformable_box_attn=False,\n                   box_attn_type='roi_align',\n                   dec_layer_number=None,\n                   num_feature_levels=5,\n                   enc_n_points=4,\n                   dec_n_points=4,\n                   decoder_layer_noise=False,\n                   dln_xy_noise=0.2,\n                   dln_hw_noise=0.2,\n                   add_channel_attention=False,\n                   add_pos_value=False,\n                   two_stage_type='standard',\n                   two_stage_pat_embed=0,\n                   two_stage_add_query_num=0,\n                   two_stage_bbox_embed_share=False,\n                   two_stage_class_embed_share=False,\n                   two_stage_learn_wh=False,\n                   two_stage_default_hw=0.05,\n                   two_stage_keep_all_tokens=False,\n                   num_select=300,\n                   transformer_activation='relu',\n                   batch_norm_type='FrozenBatchNorm2d',\n                   masks=False,\n                   aux_loss=True,\n                   set_cost_class=2.0,\n                   set_cost_bbox=5.0,\n                   set_cost_giou=2.0,\n                   no_interm_box_loss=False,\n                   focal_alpha=0.25,\n                   decoder_sa_type='sa',  # ['sa', 'ca_label', 'ca_content']\n                   matcher_type='HungarianMatcher',  # or SimpleMinsumMatcher\n                   decoder_module_seq=['sa', 'ca', 'ffn'],\n                   nms_iou_threshold=-1,\n                   dec_pred_bbox_embed_share=True,\n                   dec_pred_class_embed_share=True,\n                   use_dn=False,\n                   dn_number=100,\n                   dn_box_noise_scale=0.4,\n                   dn_label_noise_ratio=0.5,\n                   embed_init_tgt=True,\n                   dn_labelbook_size=91,\n                   match_unstable_error=True,\n                   # for ema\n                   use_ema=False,\n                   ema_decay=0.9997,\n                   ema_epoch=0,\n                   use_detached_boxes_dec_out=False),\n    det_model_ckpt='ckpt/focalnet_l_dino.pth',\n    num_classes=80,\n    model_type='vit_h',\n    sam_checkpoint='ckpt/sam_vit_h_4b8939.pth',\n    use_sam_iou=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/focalnet_dino/focalnet-l-dino_sam-vit-h_best-in-multi_cascade.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAMCascade',\n    det_wrapper_type='focalnet_dino',\n    det_wrapper_cfg=dict(num_classes=91,\n                   param_dict_type='default',\n                   ddetr_lr_param=False,\n                   onecyclelr=False,\n                   modelname='dino',\n                   frozen_weights=None,\n                   backbone='focalnet_L_384_22k_fl4',\n                   focal_levels=4,\n                   focal_windows=3,\n                   use_checkpoint=False,\n                   dilation=False,\n                   position_embedding='sine',\n                   pe_temperatureH=20,\n                   pe_temperatureW=20,\n                   return_interm_indices=[0, 1, 2, 3],\n                   backbone_freeze_keywords=None,\n                   enc_layers=6,\n                   dec_layers=6,\n                   unic_layers=0,\n                   pre_norm=False,\n                   dim_feedforward=2048,\n                   hidden_dim=256,\n                   dropout=0.0,\n                   nheads=8,\n                   num_queries=900,\n                   query_dim=4,\n                   num_patterns=0,\n                   pdetr3_bbox_embed_diff_each_layer=False,\n                   pdetr3_refHW=-1,\n                   random_refpoints_xy=False,\n                   fix_refpoints_hw=-1,\n                   dabdetr_yolo_like_anchor_update=False,\n                   dabdetr_deformable_encoder=False,\n                   dabdetr_deformable_decoder=False,\n                   use_deformable_box_attn=False,\n                   box_attn_type='roi_align',\n                   dec_layer_number=None,\n                   num_feature_levels=5,\n                   enc_n_points=4,\n                   dec_n_points=4,\n                   decoder_layer_noise=False,\n                   dln_xy_noise=0.2,\n                   dln_hw_noise=0.2,\n                   add_channel_attention=False,\n                   add_pos_value=False,\n                   two_stage_type='standard',\n                   two_stage_pat_embed=0,\n                   two_stage_add_query_num=0,\n                   two_stage_bbox_embed_share=False,\n                   two_stage_class_embed_share=False,\n                   two_stage_learn_wh=False,\n                   two_stage_default_hw=0.05,\n                   two_stage_keep_all_tokens=False,\n                   num_select=300,\n                   transformer_activation='relu',\n                   batch_norm_type='FrozenBatchNorm2d',\n                   masks=False,\n                   aux_loss=True,\n                   set_cost_class=2.0,\n                   set_cost_bbox=5.0,\n                   set_cost_giou=2.0,\n                   no_interm_box_loss=False,\n                   focal_alpha=0.25,\n                   decoder_sa_type='sa',  # ['sa', 'ca_label', 'ca_content']\n                   matcher_type='HungarianMatcher',  # or SimpleMinsumMatcher\n                   decoder_module_seq=['sa', 'ca', 'ffn'],\n                   nms_iou_threshold=-1,\n                   dec_pred_bbox_embed_share=True,\n                   dec_pred_class_embed_share=True,\n                   use_dn=False,\n                   dn_number=100,\n                   dn_box_noise_scale=0.4,\n                   dn_label_noise_ratio=0.5,\n                   embed_init_tgt=True,\n                   dn_labelbook_size=91,\n                   match_unstable_error=True,\n                   # for ema\n                   use_ema=False,\n                   ema_decay=0.9997,\n                   ema_epoch=0,\n                   use_detached_boxes_dec_out=False),\n    det_model_ckpt='ckpt/focalnet_l_dino.pth',\n    num_classes=80,\n    model_type='vit_h',\n    sam_checkpoint='ckpt/sam_vit_h_4b8939.pth',\n    use_sam_iou=True,\n    best_in_multi_mask=True,\n    stage_1_multi_mask=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/focalnet_dino/focalnet-l-dino_sam-vit-l.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='focalnet_dino',\n    det_wrapper_cfg=dict(num_classes=91,\n                   param_dict_type='default',\n                   ddetr_lr_param=False,\n                   onecyclelr=False,\n                   modelname='dino',\n                   frozen_weights=None,\n                   backbone='focalnet_L_384_22k_fl4',\n                   focal_levels=4,\n                   focal_windows=3,\n                   use_checkpoint=False,\n                   dilation=False,\n                   position_embedding='sine',\n                   pe_temperatureH=20,\n                   pe_temperatureW=20,\n                   return_interm_indices=[0, 1, 2, 3],\n                   backbone_freeze_keywords=None,\n                   enc_layers=6,\n                   dec_layers=6,\n                   unic_layers=0,\n                   pre_norm=False,\n                   dim_feedforward=2048,\n                   hidden_dim=256,\n                   dropout=0.0,\n                   nheads=8,\n                   num_queries=900,\n                   query_dim=4,\n                   num_patterns=0,\n                   pdetr3_bbox_embed_diff_each_layer=False,\n                   pdetr3_refHW=-1,\n                   random_refpoints_xy=False,\n                   fix_refpoints_hw=-1,\n                   dabdetr_yolo_like_anchor_update=False,\n                   dabdetr_deformable_encoder=False,\n                   dabdetr_deformable_decoder=False,\n                   use_deformable_box_attn=False,\n                   box_attn_type='roi_align',\n                   dec_layer_number=None,\n                   num_feature_levels=5,\n                   enc_n_points=4,\n                   dec_n_points=4,\n                   decoder_layer_noise=False,\n                   dln_xy_noise=0.2,\n                   dln_hw_noise=0.2,\n                   add_channel_attention=False,\n                   add_pos_value=False,\n                   two_stage_type='standard',\n                   two_stage_pat_embed=0,\n                   two_stage_add_query_num=0,\n                   two_stage_bbox_embed_share=False,\n                   two_stage_class_embed_share=False,\n                   two_stage_learn_wh=False,\n                   two_stage_default_hw=0.05,\n                   two_stage_keep_all_tokens=False,\n                   num_select=300,\n                   transformer_activation='relu',\n                   batch_norm_type='FrozenBatchNorm2d',\n                   masks=False,\n                   aux_loss=True,\n                   set_cost_class=2.0,\n                   set_cost_bbox=5.0,\n                   set_cost_giou=2.0,\n                   no_interm_box_loss=False,\n                   focal_alpha=0.25,\n                   decoder_sa_type='sa',  # ['sa', 'ca_label', 'ca_content']\n                   matcher_type='HungarianMatcher',  # or SimpleMinsumMatcher\n                   decoder_module_seq=['sa', 'ca', 'ffn'],\n                   nms_iou_threshold=-1,\n                   dec_pred_bbox_embed_share=True,\n                   dec_pred_class_embed_share=True,\n                   use_dn=False,\n                   dn_number=100,\n                   dn_box_noise_scale=0.4,\n                   dn_label_noise_ratio=0.5,\n                   embed_init_tgt=True,\n                   dn_labelbook_size=91,\n                   match_unstable_error=True,\n                   # for ema\n                   use_ema=False,\n                   ema_decay=0.9997,\n                   ema_epoch=0,\n                   use_detached_boxes_dec_out=False),\n    det_model_ckpt='ckpt/focalnet_l_dino.pth',\n    num_classes=80,\n    model_type='vit_l',\n    sam_checkpoint='ckpt/sam_vit_l_0b3195.pth',\n    use_sam_iou=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/r50-hdetr_sam-vit-b.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=True,\n                         backbone='resnet50',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.2,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=300,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=100,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/r50_hdetr.pth',\n    num_classes=80,\n    model_type='vit_b',\n    sam_checkpoint='ckpt/sam_vit_b_01ec64.pth',\n    use_sam_iou=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/r50-hdetr_sam-vit-b_best-in-multi.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=True,\n                         backbone='resnet50',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.2,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=300,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=100,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/r50_hdetr.pth',\n    num_classes=80,\n    model_type='vit_b',\n    sam_checkpoint='ckpt/sam_vit_b_01ec64.pth',\n    use_sam_iou=True,\n    best_in_multi_mask=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/r50-hdetr_sam-vit-b_best-in-multi_cascade.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAMCascade',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=True,\n                         backbone='resnet50',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.2,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=300,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=100,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/r50_hdetr.pth',\n    num_classes=80,\n    model_type='vit_b',\n    sam_checkpoint='ckpt/sam_vit_b_01ec64.pth',\n    use_sam_iou=True,\n    best_in_multi_mask=True,\n    stage_1_multi_mask=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/r50-hdetr_sam-vit-b_cascade.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAMCascade',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=True,\n                         backbone='resnet50',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.2,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=300,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=100,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/r50_hdetr.pth',\n    num_classes=80,\n    model_type='vit_b',\n    sam_checkpoint='ckpt/sam_vit_b_01ec64.pth',\n    use_sam_iou=True,\n    best_in_multi_mask=False,\n    stage_1_multi_mask=False,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/r50-hdetr_sam-vit-l.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=True,\n                         backbone='resnet50',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.2,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=300,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=100,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/r50_hdetr.pth',\n    num_classes=80,\n    model_type='vit_l',\n    sam_checkpoint='ckpt/sam_vit_l_0b3195.pth',\n    use_sam_iou=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/swin-l-hdetr_sam-vit-b.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=False,\n                         backbone='swin_large',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.5,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=900,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=300,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         use_wandb=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/swin_l_hdetr.pth',\n    num_classes=80,\n    model_type='vit_b',\n    sam_checkpoint='ckpt/sam_vit_b_01ec64.pth',\n    use_sam_iou=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/swin-l-hdetr_sam-vit-h.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=False,\n                         backbone='swin_large',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.5,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=900,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=300,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         use_wandb=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/swin_l_hdetr.pth',\n    num_classes=80,\n    model_type='vit_h',\n    sam_checkpoint='ckpt/sam_vit_h_4b8939.pth',\n    use_sam_iou=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/swin-l-hdetr_sam-vit-h_best-in-multi_cascade.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAMCascade',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=False,\n                         backbone='swin_large',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.5,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=900,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=300,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         use_wandb=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/swin_l_hdetr.pth',\n    num_classes=80,\n    model_type='vit_h',\n    sam_checkpoint='ckpt/sam_vit_h_4b8939.pth',\n    use_sam_iou=True,\n    best_in_multi_mask=True,\n    stage_1_multi_mask=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/swin-l-hdetr_sam-vit-l.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=False,\n                         backbone='swin_large',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.5,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=900,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=300,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         use_wandb=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/swin_l_hdetr.pth',\n    num_classes=80,\n    model_type='vit_l',\n    sam_checkpoint='ckpt/sam_vit_l_0b3195.pth',\n    use_sam_iou=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/swin-t-hdetr_sam-vit-b.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=True,\n                         backbone='swin_tiny',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.2,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=300,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=100,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/swin_t_hdetr.pth',\n    num_classes=80,\n    model_type='vit_b',\n    sam_checkpoint='ckpt/sam_vit_b_01ec64.pth',\n    use_sam_iou=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/configs/hdetr/swin-t-hdetr_sam-vit-l.py",
    "content": "_base_ = [\n    '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py'\n]\n\nplugin = True\nplugin_dir = 'projects/instance_segment_anything/'\n\nmodel = dict(\n    type='DetWrapperInstanceSAM',\n    det_wrapper_type='hdetr',\n    det_wrapper_cfg=dict(aux_loss=True,\n                         backbone='swin_tiny',\n                         num_classes=91,\n                         cache_mode=False,\n                         dec_layers=6,\n                         dec_n_points=4,\n                         dilation=False,\n                         dim_feedforward=2048,\n                         drop_path_rate=0.2,\n                         dropout=0.0,\n                         enc_layers=6,\n                         enc_n_points=4,\n                         focal_alpha=0.25,\n                         frozen_weights=None,\n                         hidden_dim=256,\n                         k_one2many=6,\n                         lambda_one2many=1.0,\n                         look_forward_twice=True,\n                         masks=False,\n                         mixed_selection=True,\n                         nheads=8,\n                         num_feature_levels=4,\n                         num_queries_one2many=1500,\n                         num_queries_one2one=300,\n                         position_embedding='sine',\n                         position_embedding_scale=6.283185307179586,\n                         remove_difficult=False,\n                         topk=100,\n                         two_stage=True,\n                         use_checkpoint=False,\n                         use_fp16=False,\n                         with_box_refine=True),\n    det_model_ckpt='ckpt/swin_t_hdetr.pth',\n    num_classes=80,\n    model_type='vit_l',\n    sam_checkpoint='ckpt/sam_vit_l_0b3195.pth',\n    use_sam_iou=True,\n)\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)\n# test_pipeline, NOTE the Pad's size_divisor is different from the default\n# setting (size_divisor=32). While there is little effect on the performance\n# whether we use the default setting or use size_divisor=1.\n\ntest_pipeline = [\n    dict(type='LoadImageFromFile'),\n    dict(\n        type='MultiScaleFlipAug',\n        img_scale=(1333, 800),\n        flip=False,\n        transforms=[\n            dict(type='Resize', keep_ratio=True),\n            dict(type='RandomFlip'),\n            dict(type='Normalize', **img_norm_cfg),\n            dict(type='Pad', size_divisor=1),\n            dict(type='ImageToTensor', keys=['img']),\n            dict(type='Collect', keys=['img'])\n        ])\n]\n\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\n\ndata = dict(\n    samples_per_gpu=1,\n    workers_per_gpu=1,\n    test=dict(\n        type=dataset_type,\n        ann_file=data_root + 'annotations/instances_val2017.json',\n        img_prefix=data_root + 'val2017/',\n        pipeline=test_pipeline))\n"
  },
  {
    "path": "projects/instance_segment_anything/__init__.py",
    "content": "from .models.det_wrapper_instance_sam import DetWrapperInstanceSAM\nfrom .models.det_wrapper_instance_sam_cascade import DetWrapperInstanceSAMCascade"
  },
  {
    "path": "projects/instance_segment_anything/models/det_wrapper_instance_sam.py",
    "content": "import cv2\nimport torch\nimport torch.nn as nn\nfrom mmcv import Config\nfrom mmcv.runner import load_checkpoint\n\nfrom mmdet.core import bbox2result\nfrom mmdet.models import DETECTORS, BaseDetector\nfrom projects.instance_segment_anything.models.segment_anything import sam_model_registry, SamPredictor\nfrom .focalnet_dino.focalnet_dino_wrapper import FocalNetDINOWrapper\nfrom .hdetr.hdetr_wrapper import HDetrWrapper\n\n\n@DETECTORS.register_module()\nclass DetWrapperInstanceSAM(BaseDetector):\n    wrapper_dict = {'hdetr': HDetrWrapper,\n                    'focalnet_dino': FocalNetDINOWrapper}\n\n    def __init__(self,\n                 det_wrapper_type='hdetr',\n                 det_wrapper_cfg=None,\n                 det_model_ckpt=None,\n                 num_classes=80,\n\n                 model_type='vit_b',\n                 sam_checkpoint=None,\n                 use_sam_iou=True,\n                 best_in_multi_mask=False,\n\n                 init_cfg=None,\n                 train_cfg=None,\n                 test_cfg=None):\n        super(DetWrapperInstanceSAM, self).__init__(init_cfg)\n        self.learnable_placeholder = nn.Embedding(1, 1)\n        det_wrapper_cfg = Config(det_wrapper_cfg)\n        assert det_wrapper_type in self.wrapper_dict.keys()\n        self.det_model = self.wrapper_dict[det_wrapper_type](args=det_wrapper_cfg)\n        if det_model_ckpt is not None:\n            load_checkpoint(self.det_model.model,\n                            filename=det_model_ckpt,\n                            map_location='cpu')\n\n        self.num_classes = num_classes\n\n        # Segment Anything\n        sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)\n        _ = sam.to(device=self.learnable_placeholder.weight.device)\n        self.predictor = SamPredictor(sam)\n        # Whether use SAM's predicted IoU to calibrate the confidence score.\n        self.use_sam_iou = use_sam_iou\n        # If True, set multimask_output=True and return the mask with highest predicted IoU.\n        # if False, set multimask_output=False and return the unique output mask.\n        self.best_in_multi_mask = best_in_multi_mask\n\n    def init_weights(self):\n        pass\n\n    def simple_test(self, img, img_metas, rescale=True, ori_img=None):\n        \"\"\"Test without augmentation.\n        Args:\n            imgs (Tensor): A batch of images.\n            img_metas (list[dict]): List of image information.\n        \"\"\"\n        assert rescale\n        assert len(img_metas) == 1\n        # results: List[dict(scores, labels, boxes)]\n        results = self.det_model.simple_test(img,\n                                             img_metas,\n                                             rescale)\n\n        # Tensor(n,4), xyxy, ori image scale\n        output_boxes = results[0]['boxes']\n\n        if ori_img is None:\n            image_path = img_metas[0]['filename']\n            ori_img = cv2.imread(image_path)\n            ori_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB)\n        self.predictor.set_image(ori_img)\n\n        transformed_boxes = self.predictor.transform.apply_boxes_torch(output_boxes, ori_img.shape[:2])\n\n        # mask_pred: n,1/3,h,w\n        # sam_score: n, 1/3\n        mask_pred, sam_score, _ = self.predictor.predict_torch(\n            point_coords=None,\n            point_labels=None,\n            boxes=transformed_boxes,\n            multimask_output=self.best_in_multi_mask,\n            return_logits=True,\n        )\n        if self.best_in_multi_mask:\n            # sam_score: n\n            sam_score, max_iou_idx = torch.max(sam_score, dim=1)\n            # mask_pred: n,h,w\n            mask_pred = mask_pred[torch.arange(mask_pred.size(0)),\n                                  max_iou_idx]\n        else:\n            # Tensor(n,h,w), raw mask pred\n            # n,1,h,w->n,h,w\n            mask_pred = mask_pred.squeeze(1)\n            # n,1->n\n            sam_score = sam_score.squeeze(-1)\n\n        # Tensor(n,)\n        label_pred = results[0]['labels']\n\n        score_pred = results[0]['scores']\n\n        # mask_pred: Tensor(n,h,w)\n        # label_pred: Tensor(n,)\n        # score_pred: Tensor(n,)\n        # sam_score: Tensor(n,)\n        mask_pred_binary = (mask_pred > self.predictor.model.mask_threshold).float()\n        if self.use_sam_iou:\n            det_scores = score_pred * sam_score\n        else:\n            # n\n            mask_scores_per_image = (mask_pred * mask_pred_binary).flatten(1).sum(1) / (\n                    mask_pred_binary.flatten(1).sum(1) + 1e-6)\n            det_scores = score_pred * mask_scores_per_image\n        # det_scores = score_pred\n        mask_pred_binary = mask_pred_binary.bool()\n        bboxes = torch.cat([output_boxes, det_scores[:, None]], dim=-1)\n        bbox_results = bbox2result(bboxes, label_pred, self.num_classes)\n        mask_results = [[] for _ in range(self.num_classes)]\n        for j, label in enumerate(label_pred):\n            mask = mask_pred_binary[j].detach().cpu().numpy()\n            mask_results[label].append(mask)\n        output_results = [(bbox_results, mask_results)]\n\n        return output_results\n\n    # not implemented:\n    def aug_test(self, imgs, img_metas, **kwargs):\n        raise NotImplementedError\n\n    def onnx_export(self, img, img_metas):\n        raise NotImplementedError\n\n    async def async_simple_test(self, img, img_metas, **kwargs):\n        raise NotImplementedError\n\n    def forward_train(self, imgs, img_metas, **kwargs):\n        raise NotImplementedError\n\n    def extract_feat(self, imgs):\n        raise NotImplementedError\n"
  },
  {
    "path": "projects/instance_segment_anything/models/det_wrapper_instance_sam_cascade.py",
    "content": "import cv2\nimport torch\n\nfrom mmdet.core import bbox2result\nfrom mmdet.models import DETECTORS\nfrom .det_wrapper_instance_sam import DetWrapperInstanceSAM\n\n\n@DETECTORS.register_module()\nclass DetWrapperInstanceSAMCascade(DetWrapperInstanceSAM):\n    def __init__(self,\n                 stage_1_multi_mask=False,\n\n                 det_wrapper_type='hdetr',\n                 det_wrapper_cfg=None,\n                 det_model_ckpt=None,\n                 num_classes=80,\n                 model_type='vit_b',\n                 sam_checkpoint=None,\n                 use_sam_iou=True,\n                 best_in_multi_mask=False,\n                 init_cfg=None,\n                 train_cfg=None,\n                 test_cfg=None):\n        super(DetWrapperInstanceSAMCascade, self).__init__(det_wrapper_type=det_wrapper_type,\n                                                           det_wrapper_cfg=det_wrapper_cfg,\n                                                           det_model_ckpt=det_model_ckpt,\n                                                           num_classes=num_classes,\n                                                           model_type=model_type,\n                                                           sam_checkpoint=sam_checkpoint,\n                                                           use_sam_iou=use_sam_iou,\n                                                           best_in_multi_mask=best_in_multi_mask,\n                                                           init_cfg=init_cfg,\n                                                           train_cfg=train_cfg,\n                                                           test_cfg=test_cfg)\n        # If True, then the coarse mask output by stage 1 will be the\n        # one with the highest predicted IoU among the three masks.\n        # If False, then stage 1 will only output one coarse mask.\n        self.stage_1_multi_mask = stage_1_multi_mask\n\n    def simple_test(self, img, img_metas, rescale=True, ori_img=None):\n        \"\"\"Test without augmentation.\n        Args:\n            imgs (Tensor): A batch of images.\n            img_metas (list[dict]): List of image information.\n        \"\"\"\n        assert rescale\n        assert len(img_metas) == 1\n        # results: List[dict(scores, labels, boxes)]\n        results = self.det_model.simple_test(img,\n                                             img_metas,\n                                             rescale)\n\n        # Tensor(n,4), xyxy, ori image scale\n        output_boxes = results[0]['boxes']\n\n        if ori_img is None:\n            image_path = img_metas[0]['filename']\n            ori_img = cv2.imread(image_path)\n            ori_img = cv2.cvtColor(ori_img, cv2.COLOR_BGR2RGB)\n        self.predictor.set_image(ori_img)\n\n        transformed_boxes = self.predictor.transform.apply_boxes_torch(output_boxes, ori_img.shape[:2])\n\n        # mask_pred: n,1/3,h,w\n        # sam_score: n, 1/3\n        # coarse_mask: n,1/3,256,256\n        _1, coarse_mask_score, coarse_mask = self.predictor.predict_torch(\n            point_coords=None,\n            point_labels=None,\n            boxes=transformed_boxes,\n            multimask_output=self.stage_1_multi_mask,\n            return_logits=True,\n        )\n        if self.stage_1_multi_mask:\n            max_iou_idx = torch.max(coarse_mask_score, dim=1)[1]\n            coarse_mask = (coarse_mask[torch.arange(coarse_mask.size(0)),\n                                       max_iou_idx]).unsqueeze(1)\n        mask_pred, sam_score, _ = self.predictor.predict_torch(\n            point_coords=None,\n            point_labels=None,\n            boxes=transformed_boxes,\n            mask_input=coarse_mask,\n            multimask_output=self.best_in_multi_mask,\n            return_logits=True,\n        )\n        if self.best_in_multi_mask:\n            # sam_score: n\n            sam_score, max_iou_idx = torch.max(sam_score, dim=1)\n            # mask_pred: n,h,w\n            mask_pred = mask_pred[torch.arange(mask_pred.size(0)),\n                                  max_iou_idx]\n        else:\n            # Tensor(n,h,w), raw mask pred\n            # n,1,h,w->n,h,w\n            mask_pred = mask_pred.squeeze(1)\n            # n,1->n\n            sam_score = sam_score.squeeze(-1)\n\n        # Tensor(n,)\n        label_pred = results[0]['labels']\n\n        score_pred = results[0]['scores']\n\n        # mask_pred: Tensor(n,h,w)\n        # label_pred: Tensor(n,)\n        # score_pred: Tensor(n,)\n        # sam_score: Tensor(n,)\n        mask_pred_binary = (mask_pred > self.predictor.model.mask_threshold).float()\n        if self.use_sam_iou:\n            det_scores = score_pred * sam_score\n        else:\n            # n\n            mask_scores_per_image = (mask_pred * mask_pred_binary).flatten(1).sum(1) / (\n                    mask_pred_binary.flatten(1).sum(1) + 1e-6)\n            det_scores = score_pred * mask_scores_per_image\n        # det_scores = score_pred\n        mask_pred_binary = mask_pred_binary.bool()\n        bboxes = torch.cat([output_boxes, det_scores[:, None]], dim=-1)\n        bbox_results = bbox2result(bboxes, label_pred, self.num_classes)\n        mask_results = [[] for _ in range(self.num_classes)]\n        for j, label in enumerate(label_pred):\n            mask = mask_pred_binary[j].detach().cpu().numpy()\n            mask_results[label].append(mask)\n        output_results = [(bbox_results, mask_results)]\n\n        return output_results\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/focalnet_dino_wrapper.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom mmcv.runner import BaseModule\n\nfrom .models import build_model\nfrom .models.dino.util.misc import NestedTensor, inverse_sigmoid\n\n\nclass FocalNetDINOWrapper(BaseModule):\n    def __init__(self,\n                 args=None,\n                 init_cfg=None):\n        super(FocalNetDINOWrapper, self).__init__(init_cfg)\n        model, _, box_postprocessor = build_model(args)\n        self.model = model\n        self.box_postprocessor = box_postprocessor\n\n        self.cls_index = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28,\n                          31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54,\n                          55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,\n                          82, 84, 85, 86, 87, 88, 89, 90]\n\n    def forward(self,\n                img,\n                img_metas):\n        \"\"\"Forward function for training mode.\n        Args:\n            img (Tensor): of shape (N, C, H, W) encoding input images.\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n        \"\"\"\n        input_img_h, input_img_w = img_metas[0][\"batch_input_shape\"]\n        batch_size = img.size(0)\n        img_masks = img.new_ones((batch_size, input_img_h, input_img_w),\n                                 dtype=torch.bool)\n        for img_id in range(batch_size):\n            img_h, img_w, _ = img_metas[img_id][\"img_shape\"]\n            img_masks[img_id, :img_h, :img_w] = False\n        samples = NestedTensor(tensors=img, mask=img_masks)\n        features, poss = self.model.backbone(samples)\n\n        srcs = []\n        masks = []\n        for l, feat in enumerate(features):\n            src, mask = feat.decompose()\n            srcs.append(self.model.input_proj[l](src))\n            masks.append(mask)\n            assert mask is not None\n        if self.model.num_feature_levels > len(srcs):\n            _len_srcs = len(srcs)\n            for l in range(_len_srcs, self.model.num_feature_levels):\n                if l == _len_srcs:\n                    src = self.model.input_proj[l](features[-1].tensors)\n                else:\n                    src = self.model.input_proj[l](srcs[-1])\n                m = samples.mask\n                mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]\n                pos_l = self.model.backbone[1](NestedTensor(src, mask)).to(src.dtype)\n                srcs.append(src)\n                masks.append(mask)\n                poss.append(pos_l)\n\n        input_query_bbox = input_query_label = attn_mask = dn_meta = None\n\n        hs, reference, hs_enc, ref_enc, init_box_proposal = self.model.transformer(srcs, masks,\n                                                                                   input_query_bbox, poss,\n                                                                                   input_query_label,\n                                                                                   attn_mask)\n        # In case num object=0\n        hs[0] += self.model.label_enc.weight[0, 0] * 0.0\n\n        # deformable-detr-like anchor update\n        # reference_before_sigmoid = inverse_sigmoid(reference[:-1]) # n_dec, bs, nq, 4\n        outputs_coord_list = []\n        for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1],\n                                                                                  self.model.bbox_embed,\n                                                                                  hs)):\n            layer_delta_unsig = layer_bbox_embed(layer_hs)\n            layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig)\n            layer_outputs_unsig = layer_outputs_unsig.sigmoid()\n            outputs_coord_list.append(layer_outputs_unsig)\n        outputs_coord_list = torch.stack(outputs_coord_list)\n\n        outputs_class = torch.stack([layer_cls_embed(layer_hs) for\n                                     layer_cls_embed, layer_hs in zip(self.model.class_embed,\n                                                                      hs)])\n        sampled_logits = outputs_class[-1][:, :, self.cls_index]\n        out = {'pred_logits': sampled_logits, 'pred_boxes': outputs_coord_list[-1]}\n\n        return out\n\n    def simple_test(self, img, img_metas, rescale=False):\n        # out: dict\n        out = self(img, img_metas)\n        if rescale:\n            ori_target_sizes = [meta_info['ori_shape'][:2] for meta_info in img_metas]\n        else:\n            ori_target_sizes = [meta_info['img_shape'][:2] for meta_info in img_metas]\n        ori_target_sizes = (out['pred_logits']).new_tensor(ori_target_sizes, dtype=torch.int64)\n        # results: List[dict(scores, labels, boxes)]\n        results = self.box_postprocessor(out, ori_target_sizes)\n\n        return results\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/__init__.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom .dino import build_dino\n\ndef build_model(args):\n    return build_dino(args)\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/__init__.py",
    "content": "# ------------------------------------------------------------------------\n# Conditional DETR\n# Copyright (c) 2021 Microsoft. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Copied from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# ------------------------------------------------------------------------\n\nfrom .dino import build_dino\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/attention.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Conditional DETR\n# Copyright (c) 2021 Microsoft. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from codes in torch.nn\n# ------------------------------------------------------------------------\n\n\"\"\"\nMultiheadAttention that support query, key, and value to have different dimensions.\nQuery, key, and value projections are removed.\n\nMostly copy-paste from https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/activation.py#L873\nand https://github.com/pytorch/pytorch/blob/master/torch/nn/functional.py#L4837\n\"\"\"\n\nimport copy\nfrom typing import Optional, List\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\n\nimport warnings\nfrom typing import Tuple, Optional\n\nimport torch\nfrom torch import Tensor\nfrom torch.nn.modules.linear import Linear\nfrom torch.nn.init import xavier_uniform_\nfrom torch.nn.init import constant_\nfrom torch.nn.init import xavier_normal_\nfrom torch.nn.parameter import Parameter\nfrom torch.nn.modules.module import Module\nfrom torch.nn import functional as F\n\nimport warnings\nimport math\n\nfrom torch._C import _infer_size, _add_docstr\nfrom torch.nn import _reduction as _Reduction\nfrom torch.nn.modules import utils\nfrom torch.nn.modules.utils import _single, _pair, _triple, _list_with_default\nfrom torch.nn import grad\nfrom torch import _VF\nfrom torch._jit_internal import boolean_dispatch, List, Optional, _overload, Tuple\ntry:\n    from torch.overrides import has_torch_function, handle_torch_function\nexcept:\n    from torch._overrides import has_torch_function, handle_torch_function\nTensor = torch.Tensor\n\nfrom torch.nn.functional import linear, pad, softmax, dropout\n\nclass MultiheadAttention(Module):\n    r\"\"\"Allows the model to jointly attend to information\n    from different representation subspaces.\n    See reference: Attention Is All You Need\n    .. math::\n        \\text{MultiHead}(Q, K, V) = \\text{Concat}(head_1,\\dots,head_h)W^O\n        \\text{where} head_i = \\text{Attention}(QW_i^Q, KW_i^K, VW_i^V)\n    Args:\n        embed_dim: total dimension of the model.\n        num_heads: parallel attention heads.\n        dropout: a Dropout layer on attn_output_weights. Default: 0.0.\n        bias: add bias as module parameter. Default: True.\n        add_bias_kv: add bias to the key and value sequences at dim=0.\n        add_zero_attn: add a new batch of zeros to the key and\n                       value sequences at dim=1.\n        kdim: total number of features in key. Default: None.\n        vdim: total number of features in value. Default: None.\n        Note: if kdim and vdim are None, they will be set to embed_dim such that\n        query, key, and value have the same number of features.\n    Examples::\n        >>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)\n        >>> attn_output, attn_output_weights = multihead_attn(query, key, value)\n    \"\"\"\n    bias_k: Optional[torch.Tensor]\n    bias_v: Optional[torch.Tensor]\n\n    def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):\n        super(MultiheadAttention, self).__init__()\n        self.embed_dim = embed_dim\n        self.kdim = kdim if kdim is not None else embed_dim\n        self.vdim = vdim if vdim is not None else embed_dim\n        self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n        self.num_heads = num_heads\n        self.dropout = dropout\n        self.head_dim = embed_dim // num_heads\n        assert self.head_dim * num_heads == self.embed_dim, \"embed_dim must be divisible by num_heads\"\n\n        vdim = vdim if vdim is not None else embed_dim\n        self.out_proj = Linear(vdim , vdim)\n\n        self.in_proj_bias = None\n        self.in_proj_weight = None\n        self.bias_k = self.bias_v = None\n        self.q_proj_weight = None\n        self.k_proj_weight = None\n        self.v_proj_weight = None\n\n        self.add_zero_attn = add_zero_attn\n\n        self._reset_parameters()\n\n    def _reset_parameters(self):\n        constant_(self.out_proj.bias, 0.)\n\n    def __setstate__(self, state):\n        # Support loading old MultiheadAttention checkpoints generated by v1.1.0\n        if '_qkv_same_embed_dim' not in state:\n            state['_qkv_same_embed_dim'] = True\n\n        super(MultiheadAttention, self).__setstate__(state)\n\n    def forward(self, query, key, value, key_padding_mask=None,\n                need_weights=True, attn_mask=None):\n        # type: (Tensor, Tensor, Tensor, Optional[Tensor], bool, Optional[Tensor]) -> Tuple[Tensor, Optional[Tensor]]\n        r\"\"\"\n    Args:\n        query, key, value: map a query and a set of key-value pairs to an output.\n            See \"Attention Is All You Need\" for more details.\n        key_padding_mask: if provided, specified padding elements in the key will\n            be ignored by the attention. When given a binary mask and a value is True,\n            the corresponding value on the attention layer will be ignored. When given\n            a byte mask and a value is non-zero, the corresponding value on the attention\n            layer will be ignored\n        need_weights: output attn_output_weights.\n        attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all\n            the batches while a 3D mask allows to specify a different mask for the entries of each batch.\n    Shape:\n        - Inputs:\n        - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is\n          the embedding dimension.\n        - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is\n          the embedding dimension.\n        - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is\n          the embedding dimension.\n        - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.\n          If a ByteTensor is provided, the non-zero positions will be ignored while the position\n          with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the\n          value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.\n        - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.\n          3D mask :math:`(N*\\text{num_heads}, L, S)` where N is the batch size, L is the target sequence length,\n          S is the source sequence length. attn_mask ensure that position i is allowed to attend the unmasked\n          positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend\n          while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``\n          is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor\n          is provided, it will be added to the attention weight.\n        - Outputs:\n        - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,\n          E is the embedding dimension.\n        - attn_output_weights: :math:`(N, L, S)` where N is the batch size,\n          L is the target sequence length, S is the source sequence length.\n        \"\"\"\n        if not self._qkv_same_embed_dim:\n            return multi_head_attention_forward(\n                query, key, value, self.embed_dim, self.num_heads,\n                self.in_proj_weight, self.in_proj_bias,\n                self.bias_k, self.bias_v, self.add_zero_attn,\n                self.dropout, self.out_proj.weight, self.out_proj.bias,\n                training=self.training,\n                key_padding_mask=key_padding_mask, need_weights=need_weights,\n                attn_mask=attn_mask, use_separate_proj_weight=True,\n                q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,\n                v_proj_weight=self.v_proj_weight, out_dim=self.vdim)\n        else:\n            return multi_head_attention_forward(\n                query, key, value, self.embed_dim, self.num_heads,\n                self.in_proj_weight, self.in_proj_bias,\n                self.bias_k, self.bias_v, self.add_zero_attn,\n                self.dropout, self.out_proj.weight, self.out_proj.bias,\n                training=self.training,\n                key_padding_mask=key_padding_mask, need_weights=need_weights,\n                attn_mask=attn_mask, out_dim=self.vdim)\n\n\ndef multi_head_attention_forward(query: Tensor,\n                                 key: Tensor,\n                                 value: Tensor,\n                                 embed_dim_to_check: int,\n                                 num_heads: int,\n                                 in_proj_weight: Tensor,\n                                 in_proj_bias: Tensor,\n                                 bias_k: Optional[Tensor],\n                                 bias_v: Optional[Tensor],\n                                 add_zero_attn: bool,\n                                 dropout_p: float,\n                                 out_proj_weight: Tensor,\n                                 out_proj_bias: Tensor,\n                                 training: bool = True,\n                                 key_padding_mask: Optional[Tensor] = None,\n                                 need_weights: bool = True,\n                                 attn_mask: Optional[Tensor] = None,\n                                 use_separate_proj_weight: bool = False,\n                                 q_proj_weight: Optional[Tensor] = None,\n                                 k_proj_weight: Optional[Tensor] = None,\n                                 v_proj_weight: Optional[Tensor] = None,\n                                 static_k: Optional[Tensor] = None,\n                                 static_v: Optional[Tensor] = None,\n                                 out_dim: Optional[Tensor] = None\n                                 ) -> Tuple[Tensor, Optional[Tensor]]:\n    r\"\"\"\n    Args:\n        query, key, value: map a query and a set of key-value pairs to an output.\n            See \"Attention Is All You Need\" for more details.\n        embed_dim_to_check: total dimension of the model.\n        num_heads: parallel attention heads.\n        in_proj_weight, in_proj_bias: input projection weight and bias.\n        bias_k, bias_v: bias of the key and value sequences to be added at dim=0.\n        add_zero_attn: add a new batch of zeros to the key and\n                       value sequences at dim=1.\n        dropout_p: probability of an element to be zeroed.\n        out_proj_weight, out_proj_bias: the output projection weight and bias.\n        training: apply dropout if is ``True``.\n        key_padding_mask: if provided, specified padding elements in the key will\n            be ignored by the attention. This is an binary mask. When the value is True,\n            the corresponding value on the attention layer will be filled with -inf.\n        need_weights: output attn_output_weights.\n        attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all\n            the batches while a 3D mask allows to specify a different mask for the entries of each batch.\n        use_separate_proj_weight: the function accept the proj. weights for query, key,\n            and value in different forms. If false, in_proj_weight will be used, which is\n            a combination of q_proj_weight, k_proj_weight, v_proj_weight.\n        q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.\n        static_k, static_v: static key and value used for attention operators.\n    Shape:\n        Inputs:\n        - query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is\n          the embedding dimension.\n        - key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is\n          the embedding dimension.\n        - value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is\n          the embedding dimension.\n        - key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.\n          If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions\n          will be unchanged. If a BoolTensor is provided, the positions with the\n          value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.\n        - attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.\n          3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,\n          S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked\n          positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend\n          while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``\n          are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor\n          is provided, it will be added to the attention weight.\n        - static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,\n          N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.\n        - static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,\n          N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.\n        Outputs:\n        - attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,\n          E is the embedding dimension.\n        - attn_output_weights: :math:`(N, L, S)` where N is the batch size,\n          L is the target sequence length, S is the source sequence length.\n    \"\"\"\n    if not torch.jit.is_scripting():\n        tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v,\n                    out_proj_weight, out_proj_bias)\n        if any([type(t) is not Tensor for t in tens_ops]) and has_torch_function(tens_ops):\n            return handle_torch_function(\n                multi_head_attention_forward, tens_ops, query, key, value,\n                embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias,\n                bias_k, bias_v, add_zero_attn, dropout_p, out_proj_weight,\n                out_proj_bias, training=training, key_padding_mask=key_padding_mask,\n                need_weights=need_weights, attn_mask=attn_mask,\n                use_separate_proj_weight=use_separate_proj_weight,\n                q_proj_weight=q_proj_weight, k_proj_weight=k_proj_weight,\n                v_proj_weight=v_proj_weight, static_k=static_k, static_v=static_v)\n    tgt_len, bsz, embed_dim = query.size()\n    assert embed_dim == embed_dim_to_check\n    # allow MHA to have different sizes for the feature dimension\n    assert key.size(0) == value.size(0) and key.size(1) == value.size(1)\n\n    head_dim = embed_dim // num_heads\n    v_head_dim = out_dim // num_heads\n    assert head_dim * num_heads == embed_dim, \"embed_dim must be divisible by num_heads\"\n    scaling = float(head_dim) ** -0.5\n\n    q = query * scaling\n    k = key\n    v = value\n\n    if attn_mask is not None:\n        assert attn_mask.dtype == torch.float32 or attn_mask.dtype == torch.float64 or \\\n            attn_mask.dtype == torch.float16 or attn_mask.dtype == torch.uint8 or attn_mask.dtype == torch.bool, \\\n            'Only float, byte, and bool types are supported for attn_mask, not {}'.format(attn_mask.dtype)\n        if attn_mask.dtype == torch.uint8:\n            warnings.warn(\"Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.\")\n            attn_mask = attn_mask.to(torch.bool)\n\n        if attn_mask.dim() == 2:\n            attn_mask = attn_mask.unsqueeze(0)\n            if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:\n                raise RuntimeError('The size of the 2D attn_mask is not correct.')\n        elif attn_mask.dim() == 3:\n            if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:\n                raise RuntimeError('The size of the 3D attn_mask is not correct.')\n        else:\n            raise RuntimeError(\"attn_mask's dimension {} is not supported\".format(attn_mask.dim()))\n        # attn_mask's dim is 3 now.\n\n    # convert ByteTensor key_padding_mask to bool\n    if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:\n        warnings.warn(\"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.\")\n        key_padding_mask = key_padding_mask.to(torch.bool)\n\n    if bias_k is not None and bias_v is not None:\n        if static_k is None and static_v is None:\n            k = torch.cat([k, bias_k.repeat(1, bsz, 1)])\n            v = torch.cat([v, bias_v.repeat(1, bsz, 1)])\n            if attn_mask is not None:\n                attn_mask = pad(attn_mask, (0, 1))\n            if key_padding_mask is not None:\n                key_padding_mask = pad(key_padding_mask, (0, 1))\n        else:\n            assert static_k is None, \"bias cannot be added to static key.\"\n            assert static_v is None, \"bias cannot be added to static value.\"\n    else:\n        assert bias_k is None\n        assert bias_v is None\n\n    q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)\n    if k is not None:\n        k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)\n    if v is not None:\n        v = v.contiguous().view(-1, bsz * num_heads, v_head_dim).transpose(0, 1)\n\n    if static_k is not None:\n        assert static_k.size(0) == bsz * num_heads\n        assert static_k.size(2) == head_dim\n        k = static_k\n\n    if static_v is not None:\n        assert static_v.size(0) == bsz * num_heads\n        assert static_v.size(2) == v_head_dim\n        v = static_v\n\n    src_len = k.size(1)\n\n    if key_padding_mask is not None:\n        assert key_padding_mask.size(0) == bsz\n        assert key_padding_mask.size(1) == src_len\n\n    if add_zero_attn:\n        src_len += 1\n        k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)\n        v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)\n        if attn_mask is not None:\n            attn_mask = pad(attn_mask, (0, 1))\n        if key_padding_mask is not None:\n            key_padding_mask = pad(key_padding_mask, (0, 1))\n\n    attn_output_weights = torch.bmm(q, k.transpose(1, 2))\n    assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]\n\n    if attn_mask is not None:\n        if attn_mask.dtype == torch.bool:\n            attn_output_weights.masked_fill_(attn_mask, float('-inf'))\n        else:\n            attn_output_weights += attn_mask\n\n\n    if key_padding_mask is not None:\n        attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)\n        attn_output_weights = attn_output_weights.masked_fill(\n            key_padding_mask.unsqueeze(1).unsqueeze(2),\n            float('-inf'),\n        )\n        attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)\n\n    # attn_output_weights = softmax(\n    #     attn_output_weights, dim=-1)\n    attn_output_weights = softmax(\n            attn_output_weights - attn_output_weights.max(dim=-1, keepdim=True)[0], dim=-1)\n    attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)\n\n    attn_output = torch.bmm(attn_output_weights, v)\n    assert list(attn_output.size()) == [bsz * num_heads, tgt_len, v_head_dim]\n    attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, out_dim)\n    attn_output = linear(attn_output, out_proj_weight, out_proj_bias)\n\n    if need_weights:\n        # average attention weights over heads\n        attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)\n        return attn_output, attn_output_weights.sum(dim=1) / num_heads\n    else:\n        return attn_output, None\n\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/backbone.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Conditional DETR\n# Copyright (c) 2021 Microsoft. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Copied from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# ------------------------------------------------------------------------\n\n\"\"\"\nBackbone modules.\n\"\"\"\nfrom collections import OrderedDict\nimport os\n\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nfrom torch import nn\nfrom torchvision.models._utils import IntermediateLayerGetter\nfrom typing import Dict, List\n\n\nfrom .util.misc import NestedTensor, clean_state_dict, is_main_process\n\nfrom .position_encoding import build_position_encoding\nfrom .convnext import build_convnext\nfrom .swin_transformer import build_swin_transformer\nfrom .focal import build_focalnet\n\nclass FrozenBatchNorm2d(torch.nn.Module):\n    \"\"\"\n    BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n    Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n    without which any other models than torchvision.models.resnet[18,34,50,101]\n    produce nans.\n    \"\"\"\n\n    def __init__(self, n):\n        super(FrozenBatchNorm2d, self).__init__()\n        self.register_buffer(\"weight\", torch.ones(n))\n        self.register_buffer(\"bias\", torch.zeros(n))\n        self.register_buffer(\"running_mean\", torch.zeros(n))\n        self.register_buffer(\"running_var\", torch.ones(n))\n\n    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n                              missing_keys, unexpected_keys, error_msgs):\n        num_batches_tracked_key = prefix + 'num_batches_tracked'\n        if num_batches_tracked_key in state_dict:\n            del state_dict[num_batches_tracked_key]\n\n        super(FrozenBatchNorm2d, self)._load_from_state_dict(\n            state_dict, prefix, local_metadata, strict,\n            missing_keys, unexpected_keys, error_msgs)\n\n    def forward(self, x):\n        # move reshapes to the beginning\n        # to make it fuser-friendly\n        w = self.weight.reshape(1, -1, 1, 1)\n        b = self.bias.reshape(1, -1, 1, 1)\n        rv = self.running_var.reshape(1, -1, 1, 1)\n        rm = self.running_mean.reshape(1, -1, 1, 1)\n        eps = 1e-5\n        scale = w * (rv + eps).rsqrt()\n        bias = b - rm * scale\n        return x * scale + bias\n\n\nclass BackboneBase(nn.Module):\n\n    def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_indices: list):\n        super().__init__()\n        for name, parameter in backbone.named_parameters():\n            if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:\n                parameter.requires_grad_(False)\n\n        return_layers = {}\n        for idx, layer_index in enumerate(return_interm_indices):\n            return_layers.update({\"layer{}\".format(5 - len(return_interm_indices) + idx): \"{}\".format(layer_index)})\n\n        # if len:\n        #     if use_stage1_feature:\n        #         return_layers = {\"layer1\": \"0\", \"layer2\": \"1\", \"layer3\": \"2\", \"layer4\": \"3\"}\n        #     else:\n        #         return_layers = {\"layer2\": \"0\", \"layer3\": \"1\", \"layer4\": \"2\"}\n        # else:\n        #     return_layers = {'layer4': \"0\"}\n        self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)\n        self.num_channels = num_channels\n\n    def forward(self, tensor_list: NestedTensor):\n        xs = self.body(tensor_list.tensors)\n        out: Dict[str, NestedTensor] = {}\n        for name, x in xs.items():\n            m = tensor_list.mask\n            assert m is not None\n            mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]\n            out[name] = NestedTensor(x, mask)\n        # import ipdb; ipdb.set_trace()\n        return out\n\n\nclass Backbone(BackboneBase):\n    \"\"\"ResNet backbone with frozen BatchNorm.\"\"\"\n    def __init__(self, name: str,\n                 train_backbone: bool,\n                 dilation: bool,\n                 return_interm_indices:list,\n                 batch_norm=FrozenBatchNorm2d,\n                 ):\n        if name in ['resnet18', 'resnet34', 'resnet50', 'resnet101']:\n            backbone = getattr(torchvision.models, name)(\n                replace_stride_with_dilation=[False, False, dilation],\n                pretrained=is_main_process(), norm_layer=batch_norm)\n        else:\n            raise NotImplementedError(\"Why you can get here with name {}\".format(name))\n        # num_channels = 512 if name in ('resnet18', 'resnet34') else 2048\n        assert name not in ('resnet18', 'resnet34'), \"Only resnet50 and resnet101 are available.\"\n        assert return_interm_indices in [[0,1,2,3], [1,2,3], [3]]\n        num_channels_all = [256, 512, 1024, 2048]\n        num_channels = num_channels_all[4-len(return_interm_indices):]\n        super().__init__(backbone, train_backbone, num_channels, return_interm_indices)\n\n\nclass Joiner(nn.Sequential):\n    def __init__(self, backbone, position_embedding):\n        super().__init__(backbone, position_embedding)\n\n    def forward(self, tensor_list: NestedTensor):\n        xs = self[0](tensor_list)\n        out: List[NestedTensor] = []\n        pos = []\n        for name, x in xs.items():\n            out.append(x)\n            # position encoding\n            pos.append(self[1](x).to(x.tensors.dtype))\n\n        return out, pos\n\n\ndef build_backbone(args):\n    \"\"\"\n    Useful args:\n        - backbone: backbone name\n        - lr_backbone: \n        - dilation\n        - return_interm_indices: available: [0,1,2,3], [1,2,3], [3]\n        - backbone_freeze_keywords: \n        - use_checkpoint: for swin only for now\n\n    \"\"\"\n    position_embedding = build_position_encoding(args)\n    train_backbone = False\n    # if not train_backbone:\n    #     raise ValueError(\"Please set lr_backbone > 0\")\n    return_interm_indices = args.return_interm_indices\n    assert return_interm_indices in [[0,1,2,3], [1,2,3], [3]]\n    backbone_freeze_keywords = args.backbone_freeze_keywords\n    use_checkpoint = getattr(args, 'use_checkpoint', False)\n\n    if args.backbone in ['resnet50', 'resnet101']:\n        backbone = Backbone(args.backbone, train_backbone, args.dilation,   \n                                return_interm_indices,   \n                                batch_norm=FrozenBatchNorm2d)\n        bb_num_channels = backbone.num_channels\n    elif args.backbone in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']:\n        pretrain_img_size = int(args.backbone.split('_')[-2])\n        backbone = build_swin_transformer(args.backbone, \\\n                    pretrain_img_size=pretrain_img_size, \\\n                    out_indices=tuple(return_interm_indices), \\\n                dilation=args.dilation, use_checkpoint=use_checkpoint)\n\n        # freeze some layers\n        if backbone_freeze_keywords is not None:\n            for name, parameter in backbone.named_parameters():\n                for keyword in backbone_freeze_keywords:\n                    if keyword in name:\n                        parameter.requires_grad_(False)\n                        break\n\n        pretrained_dir = args.backbone_dir\n        PTDICT = {\n            'swin_T_224_1k': 'swin_tiny_patch4_window7_224.pth',\n            'swin_B_384_22k': 'swin_base_patch4_window12_384.pth',\n            'swin_L_384_22k': 'swin_large_patch4_window12_384_22k.pth',\n        }\n        # pretrainedpath = os.path.join(pretrained_dir, PTDICT[args.backbone])\n        # checkpoint = torch.load(pretrainedpath, map_location='cpu')['model']\n        from collections import OrderedDict\n        def key_select_function(keyname):\n            if 'head' in keyname:\n                return False\n            if args.dilation and 'layers.3' in keyname:\n                return False\n            return True\n        _tmp_st = OrderedDict({k:v for k, v in clean_state_dict(checkpoint).items() if key_select_function(k)})\n        _tmp_st_output = backbone.load_state_dict(_tmp_st, strict=False)\n        print(str(_tmp_st_output))\n        bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]\n    elif args.backbone in [\n        'focalnet_L_384_22k', \n        'focalnet_L_384_22k_fl4', \n        'focalnet_XL_384_22k',\n        'focalnet_XL_384_22k_fl4', \n        'focalnet_H_224_22k',\n        'focalnet_H_224_22k_fl4',         \n        ]:\n        # added by Jianwei\n        backbone = build_focalnet(args.backbone, \\\n                    focal_levels=args.focal_levels, \\\n                    focal_windows=args.focal_windows, \\\n                    out_indices=tuple(return_interm_indices), \\\n                    use_checkpoint=use_checkpoint)\n\n        # freeze some layers\n        if backbone_freeze_keywords is not None:\n            for name, parameter in backbone.named_parameters():\n                for keyword in backbone_freeze_keywords:\n                    if keyword in name:\n                        parameter.requires_grad_(False)\n                        break\n\n        pretrained_dir = '/'\n        PTDICT = {\n            'focalnet_L_384_22k': 'focalnet_large_lrf_384.pth',\n            'focalnet_L_384_22k_fl4': 'focalnet_large_lrf_384_fl4.pth',            \n            'focalnet_XL_384_22k': 'focalnet_xlarge_lrf_384.pth',\n            'focalnet_XL_384_22k_fl4': 'focalnet_xlarge_lrf_384_fl4.pth',\n            'focalnet_H_224_22k': 'focalnet_huge_lrf_224.pth', \n            'focalnet_H_224_22k_fl4': 'focalnet_huge_lrf_224_fl4.pth', \n        }\n        # pretrainedpath = os.path.join(pretrained_dir, PTDICT[args.backbone])\n        # checkpoint = torch.load(pretrainedpath, map_location='cpu')['model']\n        from collections import OrderedDict\n        def key_select_function(keyname):\n            if 'head' in keyname:\n                return False\n            if args.dilation and 'layers.3' in keyname:\n                return False\n            return True        \n        # _tmp_st = OrderedDict({k:v for k, v in clean_state_dict(checkpoint).items() if key_select_function(k)})\n        # _tmp_st_output = backbone.load_state_dict(_tmp_st, strict=False)\n        # print(str(_tmp_st_output))\n        bb_num_channels = backbone.num_features[4 - len(return_interm_indices):]   \n    elif args.backbone in ['convnext_xlarge_22k']:\n        backbone = build_convnext(modelname=args.backbone, pretrained=True, out_indices=tuple(return_interm_indices),backbone_dir=args.backbone_dir)\n        bb_num_channels = backbone.dims[4 - len(return_interm_indices):]\n    else:\n        raise NotImplementedError(\"Unknown backbone {}\".format(args.backbone))\n    \n\n    assert len(bb_num_channels) == len(return_interm_indices), f\"len(bb_num_channels) {len(bb_num_channels)} != len(return_interm_indices) {len(return_interm_indices)}\"\n\n\n    model = Joiner(backbone, position_embedding)\n    model.num_channels = bb_num_channels \n    assert isinstance(bb_num_channels, List), \"bb_num_channels is expected to be a List but {}\".format(type(bb_num_channels))\n    return model\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/convnext.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\n\nfrom functools import partial\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom timm.models.layers import trunc_normal_, DropPath\n\nfrom .util.misc import NestedTensor\n# from timm.models.registry import register_model\n\nclass Block(nn.Module):\n    r\"\"\" ConvNeXt Block. There are two equivalent implementations:\n    (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)\n    (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back\n    We use (2) as we find it slightly faster in PyTorch\n    \n    Args:\n        dim (int): Number of input channels.\n        drop_path (float): Stochastic depth rate. Default: 0.0\n        layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.\n    \"\"\"\n    def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):\n        super().__init__()\n        self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv\n        self.norm = LayerNorm(dim, eps=1e-6)\n        self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers\n        self.act = nn.GELU()\n        self.pwconv2 = nn.Linear(4 * dim, dim)\n        self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), \n                                    requires_grad=True) if layer_scale_init_value > 0 else None\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n\n    def forward(self, x):\n        input = x\n        x = self.dwconv(x)\n        x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)\n        x = self.norm(x)\n        x = self.pwconv1(x)\n        x = self.act(x)\n        x = self.pwconv2(x)\n        if self.gamma is not None:\n            x = self.gamma * x\n        x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)\n\n        x = input + self.drop_path(x)\n        return x\n\nclass ConvNeXt(nn.Module):\n    r\"\"\" ConvNeXt\n        A PyTorch impl of : `A ConvNet for the 2020s`  -\n          https://arxiv.org/pdf/2201.03545.pdf\n\n    Args:\n        in_chans (int): Number of input image channels. Default: 3\n        num_classes (int): Number of classes for classification head. Default: 1000\n        depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]\n        dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]\n        drop_path_rate (float): Stochastic depth rate. Default: 0.\n        layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.\n        head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.\n    \"\"\"\n    def __init__(self, in_chans=3, num_classes=1000, \n                 depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0., \n                 layer_scale_init_value=1e-6, head_init_scale=1.,\n                 out_indices=[0, 1, 2, 3]\n                 ):\n        super().__init__()\n        self.dims = dims\n\n        self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers\n        stem = nn.Sequential(\n            nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),\n            LayerNorm(dims[0], eps=1e-6, data_format=\"channels_first\")\n        )\n        self.downsample_layers.append(stem)\n        for i in range(3):\n            downsample_layer = nn.Sequential(\n                    LayerNorm(dims[i], eps=1e-6, data_format=\"channels_first\"),\n                    nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),\n            )\n            self.downsample_layers.append(downsample_layer)\n\n        self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple residual blocks\n        dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] \n        cur = 0\n        for i in range(4):\n            stage = nn.Sequential(\n                *[Block(dim=dims[i], drop_path=dp_rates[cur + j], \n                layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])]\n            )\n            self.stages.append(stage)\n            cur += depths[i]\n\n        self.out_indices = out_indices\n\n        norm_layer = partial(LayerNorm, eps=1e-6, data_format=\"channels_first\")\n        for i_layer in range(4):\n            layer = norm_layer(dims[i_layer])\n            layer_name = f'norm{i_layer}'\n            self.add_module(layer_name, layer)\n\n        # self.norm = nn.LayerNorm(dims[-1], eps=1e-6) # final norm layer\n        # self.head = nn.Linear(dims[-1], num_classes)\n\n        # self.apply(self._init_weights)\n        # self.head.weight.data.mul_(head_init_scale)\n        # self.head.bias.data.mul_(head_init_scale)\n\n    def _init_weights(self, m):\n        if isinstance(m, (nn.Conv2d, nn.Linear)):\n            trunc_normal_(m.weight, std=.02)\n            nn.init.constant_(m.bias, 0)\n\n    def forward_features(self, x):\n        outs = []\n        for i in range(4):\n            x = self.downsample_layers[i](x)\n            x = self.stages[i](x)\n            if i in self.out_indices:\n                norm_layer = getattr(self, f'norm{i}')\n                x_out = norm_layer(x)\n                outs.append(x_out)\n        # return self.norm(x.mean([-2, -1])) # global average pooling, (N, C, H, W) -> (N, C)\n        return tuple(outs)\n\n    # def forward(self, x):\n    #     x = self.forward_features(x)\n    #     return x\n\n\n    def forward(self, tensor_list: NestedTensor):\n        x = tensor_list.tensors\n        outs = self.forward_features(x)\n\n        # collect for nesttensors        \n        outs_dict = {}\n        for idx, out_i in enumerate(outs):\n            m = tensor_list.mask\n            assert m is not None\n            mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0]\n            outs_dict[idx] = NestedTensor(out_i, mask)\n\n        return outs_dict\n\nclass LayerNorm(nn.Module):\n    r\"\"\" LayerNorm that supports two data formats: channels_last (default) or channels_first. \n    The ordering of the dimensions in the inputs. channels_last corresponds to inputs with \n    shape (batch_size, height, width, channels) while channels_first corresponds to inputs \n    with shape (batch_size, channels, height, width).\n    \"\"\"\n    def __init__(self, normalized_shape, eps=1e-6, data_format=\"channels_last\"):\n        super().__init__()\n        self.weight = nn.Parameter(torch.ones(normalized_shape))\n        self.bias = nn.Parameter(torch.zeros(normalized_shape))\n        self.eps = eps\n        self.data_format = data_format\n        if self.data_format not in [\"channels_last\", \"channels_first\"]:\n            raise NotImplementedError \n        self.normalized_shape = (normalized_shape, )\n    \n    def forward(self, x):\n        if self.data_format == \"channels_last\":\n            return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n        elif self.data_format == \"channels_first\":\n            u = x.mean(1, keepdim=True)\n            s = (x - u).pow(2).mean(1, keepdim=True)\n            x = (x - u) / torch.sqrt(s + self.eps)\n            x = self.weight[:, None, None] * x + self.bias[:, None, None]\n            return x\n\n\nmodel_urls = {\n    \"convnext_tiny_1k\": \"https://dl.fbaipublicfiles.com/convnext/convnext_tiny_1k_224_ema.pth\",\n    \"convnext_small_1k\": \"https://dl.fbaipublicfiles.com/convnext/convnext_small_1k_224_ema.pth\",\n    \"convnext_base_1k\": \"https://dl.fbaipublicfiles.com/convnext/convnext_base_1k_224_ema.pth\",\n    \"convnext_large_1k\": \"https://dl.fbaipublicfiles.com/convnext/convnext_large_1k_224_ema.pth\",\n    \"convnext_base_22k\": \"https://dl.fbaipublicfiles.com/convnext/convnext_base_22k_224.pth\",\n    \"convnext_large_22k\": \"https://dl.fbaipublicfiles.com/convnext/convnext_large_22k_224.pth\",\n    \"convnext_xlarge_22k\": \"https://dl.fbaipublicfiles.com/convnext/convnext_xlarge_22k_224.pth\",\n}\n\n# @register_model\n# def convnext_tiny(pretrained=False, **kwargs):\n#     model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)\n#     if pretrained:\n#         url = model_urls['convnext_tiny_1k']\n#         checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location=\"cpu\", check_hash=True)\n#         model.load_state_dict(checkpoint[\"model\"])\n#     return model\n\n# @register_model\n# def convnext_small(pretrained=False, **kwargs):\n#     model = ConvNeXt(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs)\n#     if pretrained:\n#         url = model_urls['convnext_small_1k']\n#         checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location=\"cpu\", check_hash=True)\n#         model.load_state_dict(checkpoint[\"model\"])\n#     return model\n\n# @register_model\n# def convnext_base(pretrained=False, in_22k=False, **kwargs):\n#     model = ConvNeXt(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)\n#     if pretrained:\n#         url = model_urls['convnext_base_22k'] if in_22k else model_urls['convnext_base_1k']\n#         checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location=\"cpu\", check_hash=True)\n#         model.load_state_dict(checkpoint[\"model\"])\n#     return model\n\n# @register_model\n# def convnext_large(pretrained=False, in_22k=False, **kwargs):\n#     model = ConvNeXt(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)\n#     if pretrained:\n#         url = model_urls['convnext_large_22k'] if in_22k else model_urls['convnext_large_1k']\n#         checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location=\"cpu\", check_hash=True)\n#         model.load_state_dict(checkpoint[\"model\"])\n#     return model\n\n# @register_model\n# def convnext_xlarge(pretrained=False, in_22k=False, **kwargs):\n#     model = ConvNeXt(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)\n#     if pretrained:\n#         url = model_urls['convnext_xlarge_22k'] if in_22k else model_urls['convnext_xlarge_1k']\n#         checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location=\"cpu\", check_hash=True)\n#         model.load_state_dict(checkpoint[\"model\"])\n#     return model\n\ndef build_convnext(modelname, pretrained,backbone_dir=None, **kw):\n    assert modelname in ['convnext_xlarge_22k']\n\n    model_para_dict = {\n        'convnext_xlarge_22k': dict(\n            depths=[3, 3, 27, 3],\n            dims=[256, 512, 1024, 2048],\n        ),\n    }\n    kw_cgf = model_para_dict[modelname]\n    kw_cgf.update(kw)\n    model = ConvNeXt(**kw_cgf)\n    if pretrained:\n        url = model_urls[modelname]\n        checkpoint = torch.hub.load_state_dict_from_url(url=url, model_dir=backbone_dir, map_location=\"cpu\", check_hash=True)\n        _tmp_st_output = model.load_state_dict(checkpoint[\"model\"], strict=False)\n        print(str(_tmp_st_output))\n\n    return model"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/deformable_transformer.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Conditional DETR Transformer class.\n# Copyright (c) 2021 Microsoft. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# ------------------------------------------------------------------------\n\nimport math, random\nimport copy\nfrom typing import Optional\n\nfrom .util.misc import inverse_sigmoid\n\nimport torch\nfrom torch import nn, Tensor\n\nfrom .utils import gen_encoder_output_proposals, MLP,_get_activation_fn, gen_sineembed_for_position\nfrom projects.instance_segment_anything.ops.modules import MSDeformAttn\n\nclass DeformableTransformer(nn.Module):\n\n    def __init__(self, d_model=256, nhead=8, \n                 num_queries=300, \n                 num_encoder_layers=6,\n                 num_unicoder_layers=0,\n                 num_decoder_layers=6, \n                 dim_feedforward=2048, dropout=0.0,\n                 activation=\"relu\", normalize_before=False,\n                 return_intermediate_dec=False, query_dim=4,\n                 num_patterns=0,\n                 modulate_hw_attn=False,\n                 # for deformable encoder\n                 deformable_encoder=False,\n                 deformable_decoder=False,\n                 num_feature_levels=1,\n                 enc_n_points=4,\n                 dec_n_points=4,\n                 use_deformable_box_attn=False,\n                 box_attn_type='roi_align',\n                 # init query\n                 learnable_tgt_init=False,\n                 decoder_query_perturber=None,\n                 add_channel_attention=False,\n                 add_pos_value=False,\n                 random_refpoints_xy=False,\n                 # two stage\n                 two_stage_type='no', # ['no', 'standard', 'early', 'combine', 'enceachlayer', 'enclayer1']\n                 two_stage_pat_embed=0,\n                 two_stage_add_query_num=0,\n                 two_stage_learn_wh=False,\n                 two_stage_keep_all_tokens=False,\n                 # evo of #anchors\n                 dec_layer_number=None,\n                 rm_enc_query_scale=True,\n                 rm_dec_query_scale=True,\n                 rm_self_attn_layers=None,\n                 key_aware_type=None,\n                 # layer share\n                 layer_share_type=None,\n                 # for detach\n                 rm_detach=None,\n                 decoder_sa_type='ca', \n                 module_seq=['sa', 'ca', 'ffn'],\n                 # for dn\n                 embed_init_tgt=False,\n\n                 use_detached_boxes_dec_out=False,\n                 ):\n        super().__init__()\n        self.num_feature_levels = num_feature_levels\n        self.num_encoder_layers = num_encoder_layers\n        self.num_unicoder_layers = num_unicoder_layers\n        self.num_decoder_layers = num_decoder_layers\n        self.deformable_encoder = deformable_encoder\n        self.deformable_decoder = deformable_decoder\n        self.two_stage_keep_all_tokens = two_stage_keep_all_tokens\n        self.num_queries = num_queries\n        self.random_refpoints_xy = random_refpoints_xy\n        self.use_detached_boxes_dec_out = use_detached_boxes_dec_out\n        assert query_dim == 4\n\n        if num_feature_levels > 1:\n            assert deformable_encoder, \"only support deformable_encoder for num_feature_levels > 1\"\n        if use_deformable_box_attn:\n            assert deformable_encoder or deformable_encoder\n\n        assert layer_share_type in [None, 'encoder', 'decoder', 'both']\n        if layer_share_type in ['encoder', 'both']:\n            enc_layer_share = True\n        else:\n            enc_layer_share = False\n        if layer_share_type in ['decoder', 'both']:\n            dec_layer_share = True\n        else:\n            dec_layer_share = False\n        assert layer_share_type is None\n\n        self.decoder_sa_type = decoder_sa_type\n        assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']\n\n        # choose encoder layer type\n        if deformable_encoder:\n            encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward,\n                                                          dropout, activation,\n                                                          num_feature_levels, nhead, enc_n_points, add_channel_attention=add_channel_attention, use_deformable_box_attn=use_deformable_box_attn, box_attn_type=box_attn_type)\n        else:\n            raise NotImplementedError\n        encoder_norm = nn.LayerNorm(d_model) if normalize_before else None\n        self.encoder = TransformerEncoder(\n            encoder_layer, num_encoder_layers, \n            encoder_norm, d_model=d_model, \n            num_queries=num_queries,\n            deformable_encoder=deformable_encoder, \n            enc_layer_share=enc_layer_share, \n            two_stage_type=two_stage_type\n        )\n\n        # choose decoder layer type\n        if deformable_decoder:\n            decoder_layer = DeformableTransformerDecoderLayer(d_model, dim_feedforward,\n                                                          dropout, activation,\n                                                          num_feature_levels, nhead, dec_n_points, use_deformable_box_attn=use_deformable_box_attn, box_attn_type=box_attn_type,\n                                                          key_aware_type=key_aware_type,\n                                                          decoder_sa_type=decoder_sa_type,\n                                                          module_seq=module_seq)\n\n        else:\n            raise NotImplementedError\n\n        decoder_norm = nn.LayerNorm(d_model)\n        self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,\n                                        return_intermediate=return_intermediate_dec,\n                                        d_model=d_model, query_dim=query_dim, \n                                        modulate_hw_attn=modulate_hw_attn,\n                                        num_feature_levels=num_feature_levels,\n                                        deformable_decoder=deformable_decoder,\n                                        decoder_query_perturber=decoder_query_perturber, \n                                        dec_layer_number=dec_layer_number, rm_dec_query_scale=rm_dec_query_scale,\n                                        dec_layer_share=dec_layer_share,\n                                        use_detached_boxes_dec_out=use_detached_boxes_dec_out\n                                        )\n\n        self.d_model = d_model\n        self.nhead = nhead\n        self.dec_layers = num_decoder_layers\n        self.num_queries = num_queries # useful for single stage model only\n        self.num_patterns = num_patterns\n        if not isinstance(num_patterns, int):\n            Warning(\"num_patterns should be int but {}\".format(type(num_patterns)))\n            self.num_patterns = 0\n\n        if num_feature_levels > 1:\n            if self.num_encoder_layers > 0:\n                self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))\n            else:\n                self.level_embed = None\n        \n        self.learnable_tgt_init = learnable_tgt_init\n        assert learnable_tgt_init, \"why not learnable_tgt_init\"\n        self.embed_init_tgt = embed_init_tgt\n        if (two_stage_type != 'no' and embed_init_tgt) or (two_stage_type == 'no'):\n            self.tgt_embed = nn.Embedding(self.num_queries, d_model)\n            nn.init.normal_(self.tgt_embed.weight.data)\n        else:\n            self.tgt_embed = None\n            \n        # for two stage\n        self.two_stage_type = two_stage_type\n        self.two_stage_pat_embed = two_stage_pat_embed\n        self.two_stage_add_query_num = two_stage_add_query_num\n        self.two_stage_learn_wh = two_stage_learn_wh\n        assert two_stage_type in ['no', 'standard'], \"unknown param {} of two_stage_type\".format(two_stage_type)\n        if two_stage_type =='standard':\n            # anchor selection at the output of encoder\n            self.enc_output = nn.Linear(d_model, d_model)\n            self.enc_output_norm = nn.LayerNorm(d_model)      \n            \n            if two_stage_pat_embed > 0:\n                self.pat_embed_for_2stage = nn.Parameter(torch.Tensor(two_stage_pat_embed, d_model))\n                nn.init.normal_(self.pat_embed_for_2stage)\n\n            if two_stage_add_query_num > 0:\n                self.tgt_embed = nn.Embedding(self.two_stage_add_query_num, d_model)\n\n            if two_stage_learn_wh:\n                # import ipdb; ipdb.set_trace()\n                self.two_stage_wh_embedding = nn.Embedding(1, 2)\n            else:\n                self.two_stage_wh_embedding = None\n\n        if two_stage_type == 'no':\n            self.init_ref_points(num_queries) # init self.refpoint_embed\n\n\n        self.enc_out_class_embed = None\n        self.enc_out_bbox_embed = None\n\n        # evolution of anchors\n        self.dec_layer_number = dec_layer_number\n        if dec_layer_number is not None:\n            if self.two_stage_type != 'no' or num_patterns == 0:\n                assert dec_layer_number[0] == num_queries, f\"dec_layer_number[0]({dec_layer_number[0]}) != num_queries({num_queries})\"\n            else:\n                assert dec_layer_number[0] == num_queries * num_patterns, f\"dec_layer_number[0]({dec_layer_number[0]}) != num_queries({num_queries}) * num_patterns({num_patterns})\"\n\n        self._reset_parameters()\n\n        self.rm_self_attn_layers = rm_self_attn_layers\n        if rm_self_attn_layers is not None:\n            # assert len(rm_self_attn_layers) == num_decoder_layers\n            print(\"Removing the self-attn in {} decoder layers\".format(rm_self_attn_layers))\n            for lid, dec_layer in enumerate(self.decoder.layers):\n                if lid in rm_self_attn_layers:\n                    dec_layer.rm_self_attn_modules()\n\n        self.rm_detach = rm_detach\n        if self.rm_detach:\n            assert isinstance(rm_detach, list)\n            assert any([i in ['enc_ref', 'enc_tgt', 'dec'] for i in rm_detach])\n        self.decoder.rm_detach = rm_detach\n\n    def _reset_parameters(self):\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n        for m in self.modules():\n            if isinstance(m, MSDeformAttn):\n                m._reset_parameters()\n        if self.num_feature_levels > 1 and self.level_embed is not None:\n            nn.init.normal_(self.level_embed)\n\n        if self.two_stage_learn_wh:\n            nn.init.constant_(self.two_stage_wh_embedding.weight, math.log(0.05 / (1 - 0.05)))\n\n\n    def get_valid_ratio(self, mask):\n        _, H, W = mask.shape\n        valid_H = torch.sum(~mask[:, :, 0], 1)\n        valid_W = torch.sum(~mask[:, 0, :], 1)\n        valid_ratio_h = valid_H.float() / H\n        valid_ratio_w = valid_W.float() / W\n        valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n        return valid_ratio\n\n    def init_ref_points(self, use_num_queries):\n        self.refpoint_embed = nn.Embedding(use_num_queries, 4)\n        \n        if self.random_refpoints_xy:\n            # import ipdb; ipdb.set_trace()\n            self.refpoint_embed.weight.data[:, :2].uniform_(0,1)\n            self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2])\n            self.refpoint_embed.weight.data[:, :2].requires_grad = False\n\n    \n\n    def forward(self, srcs, masks, refpoint_embed, pos_embeds, tgt, attn_mask=None):\n        \"\"\"\n        Input:\n            - srcs: List of multi features [bs, ci, hi, wi]\n            - masks: List of multi masks [bs, hi, wi]\n            - refpoint_embed: [bs, num_dn, 4]. None in infer\n            - pos_embeds: List of multi pos embeds [bs, ci, hi, wi]\n            - tgt: [bs, num_dn, d_model]. None in infer\n            \n        \"\"\"\n        # if self.two_stage_type != 'no' and self.two_stage_add_query_num == 0:\n        #     assert refpoint_embed is None\n\n        # prepare input for encoder\n        src_flatten = []\n        mask_flatten = []\n        lvl_pos_embed_flatten = []\n        spatial_shapes = []\n        for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):\n            bs, c, h, w = src.shape\n            spatial_shape = (h, w)\n            spatial_shapes.append(spatial_shape)\n\n            src = src.flatten(2).transpose(1, 2)                # bs, hw, c\n            mask = mask.flatten(1)                              # bs, hw\n            pos_embed = pos_embed.flatten(2).transpose(1, 2)    # bs, hw, c\n            if self.num_feature_levels > 1 and self.level_embed is not None:\n                lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)\n            else:\n                lvl_pos_embed = pos_embed\n            lvl_pos_embed_flatten.append(lvl_pos_embed)\n            src_flatten.append(src)\n            mask_flatten.append(mask)\n        src_flatten = torch.cat(src_flatten, 1)    # bs, \\sum{hxw}, c \n        mask_flatten = torch.cat(mask_flatten, 1)   # bs, \\sum{hxw}\n        lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) # bs, \\sum{hxw}, c \n        spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)\n        level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))\n        valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)\n\n        # two stage\n        enc_topk_proposals = enc_refpoint_embed = None\n\n        #########################################################\n        # Begin Encoder\n        #########################################################\n        memory, enc_intermediate_output, enc_intermediate_refpoints = self.encoder(\n                src_flatten, \n                pos=lvl_pos_embed_flatten, \n                level_start_index=level_start_index, \n                spatial_shapes=spatial_shapes,\n                valid_ratios=valid_ratios,\n                key_padding_mask=mask_flatten,\n                ref_token_index=enc_topk_proposals, # bs, nq \n                ref_token_coord=enc_refpoint_embed, # bs, nq, 4\n                )\n        #########################################################\n        # End Encoder\n        # - memory: bs, \\sum{hw}, c\n        # - mask_flatten: bs, \\sum{hw}\n        # - lvl_pos_embed_flatten: bs, \\sum{hw}, c\n        # - enc_intermediate_output: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c)\n        # - enc_intermediate_refpoints: None or (nenc+1, bs, nq, c) or (nenc, bs, nq, c)\n        #########################################################\n\n\n        if self.two_stage_type =='standard':\n            if self.two_stage_learn_wh:\n                input_hw = self.two_stage_wh_embedding.weight[0]\n            else:\n                input_hw = None\n            output_memory, output_proposals = gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes, input_hw)\n            output_memory = self.enc_output_norm(self.enc_output(output_memory))\n            \n            if self.two_stage_pat_embed > 0:\n                bs, nhw, _ = output_memory.shape\n                # output_memory: bs, n, 256; self.pat_embed_for_2stage: k, 256\n                output_memory = output_memory.repeat(1, self.two_stage_pat_embed, 1)\n                _pats = self.pat_embed_for_2stage.repeat_interleave(nhw, 0) \n                output_memory = output_memory + _pats\n                output_proposals = output_proposals.repeat(1, self.two_stage_pat_embed, 1)\n\n            if self.two_stage_add_query_num > 0:\n                assert refpoint_embed is not None\n                output_memory = torch.cat((output_memory, tgt), dim=1)\n                output_proposals = torch.cat((output_proposals, refpoint_embed), dim=1)\n\n            enc_outputs_class_unselected = self.enc_out_class_embed(output_memory)\n            enc_outputs_coord_unselected = self.enc_out_bbox_embed(output_memory) + output_proposals # (bs, \\sum{hw}, 4) unsigmoid\n            topk = self.num_queries\n            topk_proposals = torch.topk(enc_outputs_class_unselected.max(-1)[0], topk, dim=1)[1] # bs, nq\n            \n\n            # gather boxes\n            refpoint_embed_undetach = torch.gather(enc_outputs_coord_unselected, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid\n            refpoint_embed_ = refpoint_embed_undetach.detach()\n            init_box_proposal = torch.gather(output_proposals, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)).sigmoid() # sigmoid\n\n            # gather tgt\n            tgt_undetach = torch.gather(output_memory, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model))\n            if self.embed_init_tgt:\n                tgt_ = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, d_model\n            else:\n                tgt_ = tgt_undetach.detach()\n\n            if refpoint_embed is not None:\n                refpoint_embed=torch.cat([refpoint_embed,refpoint_embed_],dim=1)\n                tgt=torch.cat([tgt,tgt_],dim=1)\n            else:\n                refpoint_embed,tgt=refpoint_embed_,tgt_\n\n        elif self.two_stage_type == 'no':\n            tgt_ = self.tgt_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, d_model\n            refpoint_embed_ = self.refpoint_embed.weight[:, None, :].repeat(1, bs, 1).transpose(0, 1) # nq, bs, 4\n\n            if refpoint_embed is not None:\n                refpoint_embed=torch.cat([refpoint_embed,refpoint_embed_],dim=1)\n                tgt=torch.cat([tgt,tgt_],dim=1)\n            else:\n                refpoint_embed,tgt=refpoint_embed_,tgt_\n\n            if self.num_patterns > 0:\n                tgt_embed = tgt.repeat(1, self.num_patterns, 1)\n                refpoint_embed = refpoint_embed.repeat(1, self.num_patterns, 1)\n                tgt_pat = self.patterns.weight[None, :, :].repeat_interleave(self.num_queries, 1) # 1, n_q*n_pat, d_model\n                tgt = tgt_embed + tgt_pat\n\n            init_box_proposal = refpoint_embed_.sigmoid()\n\n        else:\n            raise NotImplementedError(\"unknown two_stage_type {}\".format(self.two_stage_type))\n        #########################################################\n        # End preparing tgt\n        # - tgt: bs, NQ, d_model\n        # - refpoint_embed(unsigmoid): bs, NQ, d_model \n        ######################################################### \n\n\n        #########################################################\n        # Begin Decoder\n        #########################################################\n        hs, references = self.decoder(\n                tgt=tgt.transpose(0, 1), \n                memory=memory.transpose(0, 1), \n                memory_key_padding_mask=mask_flatten, \n                pos=lvl_pos_embed_flatten.transpose(0, 1),\n                refpoints_unsigmoid=refpoint_embed.transpose(0, 1), \n                level_start_index=level_start_index, \n                spatial_shapes=spatial_shapes,\n                valid_ratios=valid_ratios,tgt_mask=attn_mask)\n        #########################################################\n        # End Decoder\n        # hs: n_dec, bs, nq, d_model\n        # references: n_dec+1, bs, nq, query_dim\n        #########################################################\n\n\n        #########################################################\n        # Begin postprocess\n        #########################################################     \n        if self.two_stage_type == 'standard':\n            if self.two_stage_keep_all_tokens:\n                hs_enc = output_memory.unsqueeze(0)\n                ref_enc = enc_outputs_coord_unselected.unsqueeze(0)\n                init_box_proposal = output_proposals\n                # import ipdb; ipdb.set_trace()\n            else:\n                hs_enc = tgt_undetach.unsqueeze(0)\n                ref_enc = refpoint_embed_undetach.sigmoid().unsqueeze(0)\n        else:\n            hs_enc = ref_enc = None\n        #########################################################\n        # End postprocess\n        # hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or (n_enc, bs, nq, d_model) or None\n        # ref_enc: (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or (n_enc, bs, nq, d_model) or None\n        #########################################################        \n\n        return hs, references, hs_enc, ref_enc, init_box_proposal\n        # hs: (n_dec, bs, nq, d_model)\n        # references: sigmoid coordinates. (n_dec+1, bs, bq, 4)\n        # hs_enc: (n_enc+1, bs, nq, d_model) or (1, bs, nq, d_model) or None\n        # ref_enc: sigmoid coordinates. \\\n        #           (n_enc+1, bs, nq, query_dim) or (1, bs, nq, query_dim) or None\n\nclass TransformerEncoder(nn.Module):\n\n    def __init__(self, \n        encoder_layer, num_layers, norm=None, d_model=256, \n        num_queries=300,\n        deformable_encoder=False, \n        enc_layer_share=False, enc_layer_dropout_prob=None,                  \n        two_stage_type='no', # ['no', 'standard', 'early', 'combine', 'enceachlayer', 'enclayer1']\n    ):\n        super().__init__()\n        # prepare layers\n        if num_layers > 0:\n            self.layers = _get_clones(encoder_layer, num_layers, layer_share=enc_layer_share)\n        else:\n            self.layers = []\n            del encoder_layer\n\n        self.query_scale = None\n        self.num_queries = num_queries\n        self.deformable_encoder = deformable_encoder\n        self.num_layers = num_layers\n        self.norm = norm\n        self.d_model = d_model\n\n        self.enc_layer_dropout_prob = enc_layer_dropout_prob\n        if enc_layer_dropout_prob is not None:\n            assert isinstance(enc_layer_dropout_prob, list)\n            assert len(enc_layer_dropout_prob) == num_layers\n            for i in enc_layer_dropout_prob:\n                assert 0.0 <= i <= 1.0\n\n        self.two_stage_type = two_stage_type\n        if two_stage_type in ['enceachlayer', 'enclayer1']:\n            _proj_layer = nn.Linear(d_model, d_model)\n            _norm_layer = nn.LayerNorm(d_model)\n            if two_stage_type == 'enclayer1':\n                self.enc_norm = nn.ModuleList([_norm_layer])\n                self.enc_proj = nn.ModuleList([_proj_layer])\n            else:\n                self.enc_norm = nn.ModuleList([copy.deepcopy(_norm_layer) for i in range(num_layers - 1) ])\n                self.enc_proj = nn.ModuleList([copy.deepcopy(_proj_layer) for i in range(num_layers - 1) ]) \n\n    @staticmethod\n    def get_reference_points(spatial_shapes, valid_ratios, device):\n        reference_points_list = []\n        for lvl, (H_, W_) in enumerate(spatial_shapes):\n\n            ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),\n                                          torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))\n            ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)\n            ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)\n            ref = torch.stack((ref_x, ref_y), -1)\n            reference_points_list.append(ref)\n        reference_points = torch.cat(reference_points_list, 1)\n        reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n        return reference_points\n\n    def forward(self, \n            src: Tensor, \n            pos: Tensor, \n            spatial_shapes: Tensor, \n            level_start_index: Tensor, \n            valid_ratios: Tensor, \n            key_padding_mask: Tensor,\n            ref_token_index: Optional[Tensor]=None,\n            ref_token_coord: Optional[Tensor]=None \n            ):\n        \"\"\"\n        Input:\n            - src: [bs, sum(hi*wi), 256]\n            - pos: pos embed for src. [bs, sum(hi*wi), 256]\n            - spatial_shapes: h,w of each level [num_level, 2]\n            - level_start_index: [num_level] start point of level in sum(hi*wi).\n            - valid_ratios: [bs, num_level, 2]\n            - key_padding_mask: [bs, sum(hi*wi)]\n\n            - ref_token_index: bs, nq\n            - ref_token_coord: bs, nq, 4\n        Intermedia:\n            - reference_points: [bs, sum(hi*wi), num_level, 2]\n        Outpus: \n            - output: [bs, sum(hi*wi), 256]\n        \"\"\"\n        if self.two_stage_type in ['no', 'standard', 'enceachlayer', 'enclayer1']:\n            assert ref_token_index is None\n\n        output = src\n\n        # preparation and reshape\n        if self.num_layers > 0:\n            if self.deformable_encoder:\n                reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)\n                # import ipdb; ipdb.set_trace()\n\n        intermediate_output = []\n        intermediate_ref = []\n        if ref_token_index is not None:\n            out_i = torch.gather(output, 1, ref_token_index.unsqueeze(-1).repeat(1, 1, self.d_model))\n            intermediate_output.append(out_i)\n            intermediate_ref.append(ref_token_coord)\n\n\n        # intermediate_coord = []\n        # main process\n        for layer_id, layer in enumerate(self.layers):\n            # main process\n            dropflag = False\n            if self.enc_layer_dropout_prob is not None:\n                prob = random.random()\n                if prob < self.enc_layer_dropout_prob[layer_id]:\n                    dropflag = True\n            \n            if not dropflag:\n                if self.deformable_encoder:\n                    output = layer(src=output, pos=pos, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, key_padding_mask=key_padding_mask)  \n                else:\n                    output = layer(src=output.transpose(0, 1), pos=pos.transpose(0, 1), key_padding_mask=key_padding_mask).transpose(0, 1)        \n\n            if ((layer_id == 0 and self.two_stage_type in ['enceachlayer', 'enclayer1']) \\\n                or (self.two_stage_type == 'enceachlayer')) \\\n                    and (layer_id != self.num_layers - 1):\n                output_memory, output_proposals = gen_encoder_output_proposals(output, key_padding_mask, spatial_shapes)\n                output_memory = self.enc_norm[layer_id](self.enc_proj[layer_id](output_memory))\n                \n                # gather boxes\n                topk = self.num_queries\n                enc_outputs_class = self.class_embed[layer_id](output_memory)\n                ref_token_index = torch.topk(enc_outputs_class.max(-1)[0], topk, dim=1)[1] # bs, nq\n                ref_token_coord = torch.gather(output_proposals, 1, ref_token_index.unsqueeze(-1).repeat(1, 1, 4))\n\n                output = output_memory\n\n            # aux loss\n            if (layer_id != self.num_layers - 1) and ref_token_index is not None:\n                out_i = torch.gather(output, 1, ref_token_index.unsqueeze(-1).repeat(1, 1, self.d_model))\n                intermediate_output.append(out_i)\n                intermediate_ref.append(ref_token_coord)\n\n\n        if self.norm is not None:\n            output = self.norm(output)\n\n        if ref_token_index is not None:\n            intermediate_output = torch.stack(intermediate_output) # n_enc/n_enc-1, bs, \\sum{hw}, d_model\n            intermediate_ref = torch.stack(intermediate_ref)\n        else:\n            intermediate_output = intermediate_ref = None\n\n        return output, intermediate_output, intermediate_ref\n\nclass TransformerDecoder(nn.Module):\n\n    def __init__(self, decoder_layer, num_layers, norm=None, \n                    return_intermediate=False, \n                    d_model=256, query_dim=4, \n                    modulate_hw_attn=False,\n                    num_feature_levels=1,\n                    deformable_decoder=False,\n                    decoder_query_perturber=None,\n                    dec_layer_number=None, # number of queries each layer in decoder\n                    rm_dec_query_scale=False,\n                    dec_layer_share=False,\n                    dec_layer_dropout_prob=None,\n                    use_detached_boxes_dec_out=False\n                    ):\n        super().__init__()\n        if num_layers > 0:\n            self.layers = _get_clones(decoder_layer, num_layers, layer_share=dec_layer_share)\n        else:\n            self.layers = []\n        self.num_layers = num_layers\n        self.norm = norm\n        self.return_intermediate = return_intermediate\n        assert return_intermediate, \"support return_intermediate only\"\n        self.query_dim = query_dim\n        assert query_dim in [2, 4], \"query_dim should be 2/4 but {}\".format(query_dim)\n        self.num_feature_levels = num_feature_levels\n        self.use_detached_boxes_dec_out = use_detached_boxes_dec_out\n\n        \n        self.ref_point_head = MLP(query_dim // 2 * d_model, d_model, d_model, 2)\n        if not deformable_decoder:\n            self.query_pos_sine_scale = MLP(d_model, d_model, d_model, 2)\n        else:\n            self.query_pos_sine_scale = None\n\n        if rm_dec_query_scale:\n            self.query_scale = None\n        else:\n            raise NotImplementedError\n            self.query_scale = MLP(d_model, d_model, d_model, 2)\n        self.bbox_embed = None\n        self.class_embed = None\n\n        self.d_model = d_model\n        self.modulate_hw_attn = modulate_hw_attn\n        self.deformable_decoder = deformable_decoder\n\n        if not deformable_decoder and modulate_hw_attn:\n            self.ref_anchor_head = MLP(d_model, d_model, 2, 2)\n        else:\n            self.ref_anchor_head = None\n\n        self.decoder_query_perturber = decoder_query_perturber\n        self.box_pred_damping = None\n\n        self.dec_layer_number = dec_layer_number\n        if dec_layer_number is not None:\n            assert isinstance(dec_layer_number, list)\n            assert len(dec_layer_number) == num_layers\n            # assert dec_layer_number[0] == \n            \n        self.dec_layer_dropout_prob = dec_layer_dropout_prob\n        if dec_layer_dropout_prob is not None:\n            assert isinstance(dec_layer_dropout_prob, list)\n            assert len(dec_layer_dropout_prob) == num_layers\n            for i in dec_layer_dropout_prob:\n                assert 0.0 <= i <= 1.0\n\n        self.rm_detach = None\n\n    def forward(self, tgt, memory,\n                tgt_mask: Optional[Tensor] = None,\n                memory_mask: Optional[Tensor] = None,\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                memory_key_padding_mask: Optional[Tensor] = None,\n                pos: Optional[Tensor] = None,\n                refpoints_unsigmoid: Optional[Tensor] = None, # num_queries, bs, 2\n                # for memory\n                level_start_index: Optional[Tensor] = None, # num_levels\n                spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n                valid_ratios: Optional[Tensor] = None,\n                \n                ):\n        \"\"\"\n        Input:\n            - tgt: nq, bs, d_model\n            - memory: hw, bs, d_model\n            - pos: hw, bs, d_model\n            - refpoints_unsigmoid: nq, bs, 2/4\n            - valid_ratios/spatial_shapes: bs, nlevel, 2\n        \"\"\"\n        output = tgt\n\n        intermediate = []\n        reference_points = refpoints_unsigmoid.sigmoid()\n        ref_points = [reference_points]  \n\n        for layer_id, layer in enumerate(self.layers):\n            # preprocess ref points\n            if self.training and self.decoder_query_perturber is not None and layer_id != 0:\n                reference_points = self.decoder_query_perturber(reference_points)\n\n\n\n            if self.deformable_decoder:\n                if reference_points.shape[-1] == 4:\n                    reference_points_input = reference_points[:, :, None] \\\n                                            * torch.cat([valid_ratios, valid_ratios], -1)[None, :] # nq, bs, nlevel, 4\n                else:\n                    assert reference_points.shape[-1] == 2\n                    reference_points_input = reference_points[:, :, None] * valid_ratios[None, :]\n                query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # nq, bs, 256*2 \n            else:\n                query_sine_embed = gen_sineembed_for_position(reference_points) # nq, bs, 256*2\n                reference_points_input = None\n\n            # conditional query\n            # import ipdb; ipdb.set_trace()\n            raw_query_pos = self.ref_point_head(query_sine_embed) # nq, bs, 256\n            pos_scale = self.query_scale(output) if self.query_scale is not None else 1\n            query_pos = pos_scale * raw_query_pos\n            if not self.deformable_decoder:\n                query_sine_embed = query_sine_embed[..., :self.d_model] * self.query_pos_sine_scale(output)\n\n            # modulated HW attentions\n            if not self.deformable_decoder and self.modulate_hw_attn:\n                refHW_cond = self.ref_anchor_head(output).sigmoid() # nq, bs, 2\n                query_sine_embed[..., self.d_model // 2:] *= (refHW_cond[..., 0] / reference_points[..., 2]).unsqueeze(-1)\n                query_sine_embed[..., :self.d_model // 2] *= (refHW_cond[..., 1] / reference_points[..., 3]).unsqueeze(-1)\n\n            # main process\n            # import ipdb; ipdb.set_trace()\n            dropflag = False\n            if self.dec_layer_dropout_prob is not None:\n                prob = random.random()\n                if prob < self.dec_layer_dropout_prob[layer_id]:\n                    dropflag = True\n            if not dropflag:\n                output = layer(\n                    tgt = output,\n                    tgt_query_pos = query_pos,\n                    tgt_query_sine_embed = query_sine_embed,\n                    tgt_key_padding_mask = tgt_key_padding_mask,\n                    tgt_reference_points = reference_points_input,\n\n                    memory = memory,\n                    memory_key_padding_mask = memory_key_padding_mask,\n                    memory_level_start_index = level_start_index,\n                    memory_spatial_shapes = spatial_shapes,\n                    memory_pos = pos,\n\n                    self_attn_mask = tgt_mask,\n                    cross_attn_mask = memory_mask\n                )\n\n            # iter update\n            if self.bbox_embed is not None:\n                # box_holder = self.bbox_embed(output)\n                # box_holder[..., :self.query_dim] += inverse_sigmoid(reference_points)\n                # new_reference_points = box_holder[..., :self.query_dim].sigmoid()\n\n                reference_before_sigmoid = inverse_sigmoid(reference_points)\n                delta_unsig = self.bbox_embed[layer_id](output)\n                outputs_unsig = delta_unsig + reference_before_sigmoid\n                new_reference_points = outputs_unsig.sigmoid()\n\n                # select # ref points\n                if self.dec_layer_number is not None and layer_id != self.num_layers - 1:\n                    # import ipdb; ipdb.set_trace()\n                    nq_now = new_reference_points.shape[0]\n                    select_number = self.dec_layer_number[layer_id + 1]\n                    if nq_now != select_number:\n                        class_unselected = self.class_embed[layer_id](output) # nq, bs, 91\n                        topk_proposals = torch.topk(class_unselected.max(-1)[0], select_number, dim=0)[1] # new_nq, bs\n                        new_reference_points = torch.gather(new_reference_points, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)) # unsigmoid\n\n                if self.rm_detach and 'dec' in self.rm_detach:\n                    reference_points = new_reference_points\n                else:\n                    reference_points = new_reference_points.detach()\n                if self.use_detached_boxes_dec_out:\n                    ref_points.append(reference_points)\n                else:\n                    ref_points.append(new_reference_points)\n\n\n            intermediate.append(self.norm(output))\n            if self.dec_layer_number is not None and layer_id != self.num_layers - 1:\n                if nq_now != select_number:\n                    output = torch.gather(output, 0, topk_proposals.unsqueeze(-1).repeat(1, 1, self.d_model)) # unsigmoid\n\n\n        return [\n            [itm_out.transpose(0, 1) for itm_out in intermediate],\n            [itm_refpoint.transpose(0, 1) for itm_refpoint in ref_points]\n        ]\n\nclass DeformableTransformerEncoderLayer(nn.Module):\n    def __init__(self,\n                 d_model=256, d_ffn=1024,\n                 dropout=0.1, activation=\"relu\",\n                 n_levels=4, n_heads=8, n_points=4,\n                 add_channel_attention=False,\n                 use_deformable_box_attn=False,\n                 box_attn_type='roi_align',\n                 ):\n        super().__init__()\n\n        # self attention\n        if use_deformable_box_attn:\n            self.self_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)\n        else:\n            self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n        self.dropout1 = nn.Dropout(dropout)\n        self.norm1 = nn.LayerNorm(d_model)\n\n        # ffn\n        self.linear1 = nn.Linear(d_model, d_ffn)\n        self.activation = _get_activation_fn(activation, d_model=d_ffn)\n        self.dropout2 = nn.Dropout(dropout)\n        self.linear2 = nn.Linear(d_ffn, d_model)\n        self.dropout3 = nn.Dropout(dropout)\n        self.norm2 = nn.LayerNorm(d_model)\n\n        # channel attention\n        self.add_channel_attention = add_channel_attention\n        if add_channel_attention:\n            self.activ_channel = _get_activation_fn('dyrelu', d_model=d_model)\n            self.norm_channel = nn.LayerNorm(d_model)\n\n    @staticmethod\n    def with_pos_embed(tensor, pos):\n        return tensor if pos is None else tensor + pos\n\n    def forward_ffn(self, src):\n        src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))\n        src = src + self.dropout3(src2)\n        src = self.norm2(src)\n        return src\n\n    def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, key_padding_mask=None):\n        # self attention\n        src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, key_padding_mask)\n        src = src + self.dropout1(src2)\n        src = self.norm1(src)\n\n        # ffn\n        src = self.forward_ffn(src)\n\n        # channel attn\n        if self.add_channel_attention:\n            src = self.norm_channel(src + self.activ_channel(src))\n\n        return src\n\nclass DeformableTransformerDecoderLayer(nn.Module):\n    def __init__(self, d_model=256, d_ffn=1024,\n                 dropout=0.1, activation=\"relu\",\n                 n_levels=4, n_heads=8, n_points=4,\n                 use_deformable_box_attn=False,\n                 box_attn_type='roi_align',\n                 key_aware_type=None,\n                 decoder_sa_type='ca',\n                 module_seq=['sa', 'ca', 'ffn'],\n                 ):\n        super().__init__()\n        self.module_seq = module_seq\n        assert sorted(module_seq) == ['ca', 'ffn', 'sa']\n\n        # cross attention\n        # self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n        if use_deformable_box_attn:\n            self.cross_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)\n        else:\n            self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n        self.dropout1 = nn.Dropout(dropout)\n        self.norm1 = nn.LayerNorm(d_model)\n\n        # self attention\n        self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\n        self.dropout2 = nn.Dropout(dropout)\n        self.norm2 = nn.LayerNorm(d_model)\n\n        # ffn\n        self.linear1 = nn.Linear(d_model, d_ffn)\n        self.activation = _get_activation_fn(activation, d_model=d_ffn, batch_dim=1)\n        self.dropout3 = nn.Dropout(dropout)\n        self.linear2 = nn.Linear(d_ffn, d_model)\n        self.dropout4 = nn.Dropout(dropout)\n        self.norm3 = nn.LayerNorm(d_model)\n\n        self.key_aware_type = key_aware_type\n        self.key_aware_proj = None\n        self.decoder_sa_type = decoder_sa_type\n        assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']\n\n        if decoder_sa_type == 'ca_content':\n            self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n\n\n\n\n    def rm_self_attn_modules(self):\n        self.self_attn = None\n        self.dropout2 = None\n        self.norm2 = None\n\n\n    @staticmethod\n    def with_pos_embed(tensor, pos):\n        return tensor if pos is None else tensor + pos\n\n    def forward_ffn(self, tgt):\n        tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))\n        tgt = tgt + self.dropout4(tgt2)\n        tgt = self.norm3(tgt)\n        return tgt\n\n    def forward_sa(self,\n                # for tgt\n                tgt: Optional[Tensor],  # nq, bs, d_model\n                tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\n                tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\n\n                # for memory\n                memory: Optional[Tensor] = None, # hw, bs, d_model\n                memory_key_padding_mask: Optional[Tensor] = None,\n                memory_level_start_index: Optional[Tensor] = None, # num_levels\n                memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n                memory_pos: Optional[Tensor] = None, # pos for memory\n\n                # sa\n                self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\n                cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\n            ):\n        # self attention\n        if self.self_attn is not None:\n            if self.decoder_sa_type == 'sa':\n                q = k = self.with_pos_embed(tgt, tgt_query_pos)\n                tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0]\n                tgt = tgt + self.dropout2(tgt2)\n                tgt = self.norm2(tgt)\n            elif self.decoder_sa_type == 'ca_label':\n                bs = tgt.shape[1]\n                k = v = self.label_embedding.weight[:, None, :].repeat(1, bs, 1)\n                tgt2 = self.self_attn(tgt, k, v, attn_mask=self_attn_mask)[0]\n                tgt = tgt + self.dropout2(tgt2)\n                tgt = self.norm2(tgt)\n            elif self.decoder_sa_type == 'ca_content':\n                tgt2 = self.self_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),\n                            tgt_reference_points.transpose(0, 1).contiguous(),\n                            memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask).transpose(0, 1)\n                tgt = tgt + self.dropout2(tgt2)\n                tgt = self.norm2(tgt)\n            else:\n                raise NotImplementedError(\"Unknown decoder_sa_type {}\".format(self.decoder_sa_type))\n\n        return tgt\n\n    def forward_ca(self,\n                # for tgt\n                tgt: Optional[Tensor],  # nq, bs, d_model\n                tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\n                tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\n\n                # for memory\n                memory: Optional[Tensor] = None, # hw, bs, d_model\n                memory_key_padding_mask: Optional[Tensor] = None,\n                memory_level_start_index: Optional[Tensor] = None, # num_levels\n                memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n                memory_pos: Optional[Tensor] = None, # pos for memory\n\n                # sa\n                self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\n                cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\n            ):\n        # cross attention\n        if self.key_aware_type is not None:\n\n            if self.key_aware_type == 'mean':\n                tgt = tgt + memory.mean(0, keepdim=True)\n            elif self.key_aware_type == 'proj_mean':\n                tgt = tgt + self.key_aware_proj(memory).mean(0, keepdim=True)\n            else:\n                raise NotImplementedError(\"Unknown key_aware_type: {}\".format(self.key_aware_type))\n        tgt2 = self.cross_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),\n                               tgt_reference_points.transpose(0, 1).contiguous(),\n                               memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask).transpose(0, 1)\n        tgt = tgt + self.dropout1(tgt2)\n        tgt = self.norm1(tgt)\n\n        return tgt\n\n    def forward(self,\n                # for tgt\n                tgt: Optional[Tensor],  # nq, bs, d_model\n                tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\n                tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\n\n                # for memory\n                memory: Optional[Tensor] = None, # hw, bs, d_model\n                memory_key_padding_mask: Optional[Tensor] = None,\n                memory_level_start_index: Optional[Tensor] = None, # num_levels\n                memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n                memory_pos: Optional[Tensor] = None, # pos for memory\n\n                # sa\n                self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\n                cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\n            ):\n\n        for funcname in self.module_seq:\n            if funcname == 'ffn':\n                tgt = self.forward_ffn(tgt)\n            elif funcname == 'ca':\n                tgt = self.forward_ca(tgt, tgt_query_pos, tgt_query_sine_embed, \\\n                    tgt_key_padding_mask, tgt_reference_points, \\\n                        memory, memory_key_padding_mask, memory_level_start_index, \\\n                            memory_spatial_shapes, memory_pos, self_attn_mask, cross_attn_mask)\n            elif funcname == 'sa':\n                tgt = self.forward_sa(tgt, tgt_query_pos, tgt_query_sine_embed, \\\n                    tgt_key_padding_mask, tgt_reference_points, \\\n                        memory, memory_key_padding_mask, memory_level_start_index, \\\n                            memory_spatial_shapes, memory_pos, self_attn_mask, cross_attn_mask)\n            else:\n                raise ValueError('unknown funcname {}'.format(funcname))\n\n        return tgt\n\n\ndef _get_clones(module, N, layer_share=False):\n    if layer_share:\n        return nn.ModuleList([module for i in range(N)])\n    else:\n        return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef build_deformable_transformer(args):\n    decoder_query_perturber = None\n    if args.decoder_layer_noise:\n        from .utils import RandomBoxPerturber\n        decoder_query_perturber=RandomBoxPerturber(\n                x_noise_scale=args.dln_xy_noise, y_noise_scale=args.dln_xy_noise, \n                w_noise_scale=args.dln_hw_noise, h_noise_scale=args.dln_hw_noise)\n\n    use_detached_boxes_dec_out = False\n    try:\n        use_detached_boxes_dec_out = args.use_detached_boxes_dec_out\n    except:\n        use_detached_boxes_dec_out =False\n\n    return DeformableTransformer(\n        d_model=args.hidden_dim,\n        dropout=args.dropout,\n        nhead=args.nheads,\n        num_queries=args.num_queries,\n        dim_feedforward=args.dim_feedforward,\n        num_encoder_layers=args.enc_layers,\n        num_unicoder_layers=args.unic_layers,\n        num_decoder_layers=args.dec_layers,\n        normalize_before=args.pre_norm,\n        return_intermediate_dec=True,\n        query_dim=args.query_dim,\n        activation=args.transformer_activation,\n        num_patterns=args.num_patterns,\n        modulate_hw_attn=True,\n\n        deformable_encoder=True,\n        deformable_decoder=True,\n        num_feature_levels=args.num_feature_levels,\n        enc_n_points=args.enc_n_points,\n        dec_n_points=args.dec_n_points,\n        use_deformable_box_attn=args.use_deformable_box_attn,\n        box_attn_type=args.box_attn_type,\n\n        learnable_tgt_init=True,\n        decoder_query_perturber=decoder_query_perturber,\n\n        add_channel_attention=args.add_channel_attention,\n        add_pos_value=args.add_pos_value,\n        random_refpoints_xy=args.random_refpoints_xy,\n\n        # two stage\n        two_stage_type=args.two_stage_type, # ['no', 'standard', 'early']\n        two_stage_pat_embed=args.two_stage_pat_embed,\n        two_stage_add_query_num=args.two_stage_add_query_num,\n        two_stage_learn_wh=args.two_stage_learn_wh,\n        two_stage_keep_all_tokens=args.two_stage_keep_all_tokens,\n        dec_layer_number=args.dec_layer_number,\n        rm_self_attn_layers=None,\n        key_aware_type=None,\n        layer_share_type=None,\n\n        rm_detach=None,\n        decoder_sa_type=args.decoder_sa_type,\n        module_seq=args.decoder_module_seq,\n\n        embed_init_tgt=args.embed_init_tgt,\n        use_detached_boxes_dec_out=use_detached_boxes_dec_out\n    )\n\n\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/dino.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Conditional DETR model and criterion classes.\n# Copyright (c) 2021 Microsoft. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# ------------------------------------------------------------------------\n# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# ------------------------------------------------------------------------\nimport copy\nimport math\nfrom typing import List\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom torchvision.ops.boxes import nms\n\nfrom .util import box_ops\nfrom .util.misc import (NestedTensor, nested_tensor_from_tensor_list,\n                        accuracy, get_world_size, interpolate,\n                        is_dist_avail_and_initialized, inverse_sigmoid)\n\nfrom .backbone import build_backbone\nfrom .matcher import build_matcher\nfrom .segmentation import (dice_loss)\nfrom .deformable_transformer import build_deformable_transformer\nfrom .utils import sigmoid_focal_loss, MLP\n\nfrom .dn_components import prepare_for_cdn, dn_post_process\n\n\nclass DINO(nn.Module):\n    \"\"\" This is the Cross-Attention Detector module that performs object detection \"\"\"\n\n    def __init__(self, backbone, transformer, num_classes, num_queries,\n                 aux_loss=False, iter_update=False,\n                 query_dim=2,\n                 random_refpoints_xy=False,\n                 fix_refpoints_hw=-1,\n                 num_feature_levels=1,\n                 nheads=8,\n                 # two stage\n                 two_stage_type='no',  # ['no', 'standard']\n                 two_stage_add_query_num=0,\n                 dec_pred_class_embed_share=True,\n                 dec_pred_bbox_embed_share=True,\n                 two_stage_class_embed_share=True,\n                 two_stage_bbox_embed_share=True,\n                 decoder_sa_type='sa',\n                 num_patterns=0,\n                 dn_number=100,\n                 dn_box_noise_scale=0.4,\n                 dn_label_noise_ratio=0.5,\n                 dn_labelbook_size=100,\n                 ):\n        \"\"\" Initializes the model.\n        Parameters:\n            backbone: torch module of the backbone to be used. See backbone.py\n            transformer: torch module of the transformer architecture. See transformer.py\n            num_classes: number of object classes\n            num_queries: number of object queries, ie detection slot. This is the maximal number of objects\n                         Conditional DETR can detect in a single image. For COCO, we recommend 100 queries.\n            aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n\n            fix_refpoints_hw: -1(default): learn w and h for each box seperately\n                                >0 : given fixed number\n                                -2 : learn a shared w and h\n        \"\"\"\n        super().__init__()\n        self.num_queries = num_queries\n        self.transformer = transformer\n        self.num_classes = num_classes\n        self.hidden_dim = hidden_dim = transformer.d_model\n        self.num_feature_levels = num_feature_levels\n        self.nheads = nheads\n        self.label_enc = nn.Embedding(dn_labelbook_size + 1, hidden_dim)\n\n        # setting query dim\n        self.query_dim = query_dim\n        assert query_dim == 4\n        self.random_refpoints_xy = random_refpoints_xy\n        self.fix_refpoints_hw = fix_refpoints_hw\n\n        # for dn training\n        self.num_patterns = num_patterns\n        self.dn_number = dn_number\n        self.dn_box_noise_scale = dn_box_noise_scale\n        self.dn_label_noise_ratio = dn_label_noise_ratio\n        self.dn_labelbook_size = dn_labelbook_size\n\n        # prepare input projection layers\n        if num_feature_levels > 1:\n            num_backbone_outs = len(backbone.num_channels)\n            input_proj_list = []\n            for _ in range(num_backbone_outs):\n                in_channels = backbone.num_channels[_]\n                input_proj_list.append(nn.Sequential(\n                    nn.Conv2d(in_channels, hidden_dim, kernel_size=1),\n                    nn.GroupNorm(32, hidden_dim),\n                ))\n            for _ in range(num_feature_levels - num_backbone_outs):\n                input_proj_list.append(nn.Sequential(\n                    nn.Conv2d(in_channels, hidden_dim, kernel_size=3, stride=2, padding=1),\n                    nn.GroupNorm(32, hidden_dim),\n                ))\n                in_channels = hidden_dim\n            self.input_proj = nn.ModuleList(input_proj_list)\n        else:\n            assert two_stage_type == 'no', \"two_stage_type should be no if num_feature_levels=1 !!!\"\n            self.input_proj = nn.ModuleList([\n                nn.Sequential(\n                    nn.Conv2d(backbone.num_channels[-1], hidden_dim, kernel_size=1),\n                    nn.GroupNorm(32, hidden_dim),\n                )])\n\n        self.backbone = backbone\n        self.aux_loss = aux_loss\n        self.box_pred_damping = box_pred_damping = None\n\n        self.iter_update = iter_update\n        assert iter_update, \"Why not iter_update?\"\n\n        # prepare pred layers\n        self.dec_pred_class_embed_share = dec_pred_class_embed_share\n        self.dec_pred_bbox_embed_share = dec_pred_bbox_embed_share\n        # prepare class & box embed\n        _class_embed = nn.Linear(hidden_dim, num_classes)\n        _bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)\n        # init the two embed layers\n        prior_prob = 0.01\n        bias_value = -math.log((1 - prior_prob) / prior_prob)\n        _class_embed.bias.data = torch.ones(self.num_classes) * bias_value\n        nn.init.constant_(_bbox_embed.layers[-1].weight.data, 0)\n        nn.init.constant_(_bbox_embed.layers[-1].bias.data, 0)\n\n        if dec_pred_bbox_embed_share:\n            box_embed_layerlist = [_bbox_embed for i in range(transformer.num_decoder_layers)]\n        else:\n            box_embed_layerlist = [copy.deepcopy(_bbox_embed) for i in range(transformer.num_decoder_layers)]\n        if dec_pred_class_embed_share:\n            class_embed_layerlist = [_class_embed for i in range(transformer.num_decoder_layers)]\n        else:\n            class_embed_layerlist = [copy.deepcopy(_class_embed) for i in range(transformer.num_decoder_layers)]\n        self.bbox_embed = nn.ModuleList(box_embed_layerlist)\n        self.class_embed = nn.ModuleList(class_embed_layerlist)\n        self.transformer.decoder.bbox_embed = self.bbox_embed\n        self.transformer.decoder.class_embed = self.class_embed\n\n        # two stage\n        self.two_stage_type = two_stage_type\n        self.two_stage_add_query_num = two_stage_add_query_num\n        assert two_stage_type in ['no', 'standard'], \"unknown param {} of two_stage_type\".format(two_stage_type)\n        if two_stage_type != 'no':\n            if two_stage_bbox_embed_share:\n                assert dec_pred_class_embed_share and dec_pred_bbox_embed_share\n                self.transformer.enc_out_bbox_embed = _bbox_embed\n            else:\n                self.transformer.enc_out_bbox_embed = copy.deepcopy(_bbox_embed)\n\n            if two_stage_class_embed_share:\n                assert dec_pred_class_embed_share and dec_pred_bbox_embed_share\n                self.transformer.enc_out_class_embed = _class_embed\n            else:\n                self.transformer.enc_out_class_embed = copy.deepcopy(_class_embed)\n\n            self.refpoint_embed = None\n            if self.two_stage_add_query_num > 0:\n                self.init_ref_points(two_stage_add_query_num)\n\n        self.decoder_sa_type = decoder_sa_type\n        assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']\n        # self.replace_sa_with_double_ca = replace_sa_with_double_ca\n        if decoder_sa_type == 'ca_label':\n            self.label_embedding = nn.Embedding(num_classes, hidden_dim)\n            for layer in self.transformer.decoder.layers:\n                layer.label_embedding = self.label_embedding\n        else:\n            for layer in self.transformer.decoder.layers:\n                layer.label_embedding = None\n            self.label_embedding = None\n\n        self._reset_parameters()\n\n    def _reset_parameters(self):\n        # init input_proj\n        for proj in self.input_proj:\n            nn.init.xavier_uniform_(proj[0].weight, gain=1)\n            nn.init.constant_(proj[0].bias, 0)\n\n    def init_ref_points(self, use_num_queries):\n        self.refpoint_embed = nn.Embedding(use_num_queries, self.query_dim)\n\n        if self.random_refpoints_xy:\n            # import ipdb; ipdb.set_trace()\n            self.refpoint_embed.weight.data[:, :2].uniform_(0, 1)\n            self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2])\n            self.refpoint_embed.weight.data[:, :2].requires_grad = False\n\n        if self.fix_refpoints_hw > 0:\n            print(\"fix_refpoints_hw: {}\".format(self.fix_refpoints_hw))\n            assert self.random_refpoints_xy\n            self.refpoint_embed.weight.data[:, 2:] = self.fix_refpoints_hw\n            self.refpoint_embed.weight.data[:, 2:] = inverse_sigmoid(self.refpoint_embed.weight.data[:, 2:])\n            self.refpoint_embed.weight.data[:, 2:].requires_grad = False\n        elif int(self.fix_refpoints_hw) == -1:\n            pass\n        elif int(self.fix_refpoints_hw) == -2:\n            print('learn a shared h and w')\n            assert self.random_refpoints_xy\n            self.refpoint_embed = nn.Embedding(use_num_queries, 2)\n            self.refpoint_embed.weight.data[:, :2].uniform_(0, 1)\n            self.refpoint_embed.weight.data[:, :2] = inverse_sigmoid(self.refpoint_embed.weight.data[:, :2])\n            self.refpoint_embed.weight.data[:, :2].requires_grad = False\n            self.hw_embed = nn.Embedding(1, 1)\n        else:\n            raise NotImplementedError('Unknown fix_refpoints_hw {}'.format(self.fix_refpoints_hw))\n\n    def forward(self, samples: NestedTensor, targets: List = None):\n        \"\"\" The forward expects a NestedTensor, which consists of:\n               - samples.tensor: batched images, of shape [batch_size x 3 x H x W]\n               - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels\n\n            It returns a dict with the following elements:\n               - \"pred_logits\": the classification logits (including no-object) for all queries.\n                                Shape= [batch_size x num_queries x num_classes]\n               - \"pred_boxes\": The normalized boxes coordinates for all queries, represented as\n                               (center_x, center_y, width, height). These values are normalized in [0, 1],\n                               relative to the size of each individual image (disregarding possible padding).\n                               See PostProcess for information on how to retrieve the unnormalized bounding box.\n               - \"aux_outputs\": Optional, only returned when auxilary losses are activated. It is a list of\n                                dictionnaries containing the two above keys for each decoder layer.\n        \"\"\"\n        if isinstance(samples, (list, torch.Tensor)):\n            samples = nested_tensor_from_tensor_list(samples)\n        features, poss = self.backbone(samples)\n\n        srcs = []\n        masks = []\n        for l, feat in enumerate(features):\n            src, mask = feat.decompose()\n            srcs.append(self.input_proj[l](src))\n            masks.append(mask)\n            assert mask is not None\n        if self.num_feature_levels > len(srcs):\n            _len_srcs = len(srcs)\n            for l in range(_len_srcs, self.num_feature_levels):\n                if l == _len_srcs:\n                    src = self.input_proj[l](features[-1].tensors)\n                else:\n                    src = self.input_proj[l](srcs[-1])\n                m = samples.mask\n                mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(torch.bool)[0]\n                pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)\n                srcs.append(src)\n                masks.append(mask)\n                poss.append(pos_l)\n\n        if self.dn_number > 0 or targets is not None:\n            input_query_label, input_query_bbox, attn_mask, dn_meta = \\\n                prepare_for_cdn(dn_args=(targets, self.dn_number, self.dn_label_noise_ratio, self.dn_box_noise_scale),\n                                training=self.training, num_queries=self.num_queries, num_classes=self.num_classes,\n                                hidden_dim=self.hidden_dim, label_enc=self.label_enc)\n        else:\n            assert targets is None\n            input_query_bbox = input_query_label = attn_mask = dn_meta = None\n\n        hs, reference, hs_enc, ref_enc, init_box_proposal = self.transformer(srcs, masks, input_query_bbox, poss,\n                                                                             input_query_label, attn_mask)\n        # In case num object=0\n        hs[0] += self.label_enc.weight[0, 0] * 0.0\n\n        # deformable-detr-like anchor update\n        # reference_before_sigmoid = inverse_sigmoid(reference[:-1]) # n_dec, bs, nq, 4\n        outputs_coord_list = []\n        for dec_lid, (layer_ref_sig, layer_bbox_embed, layer_hs) in enumerate(zip(reference[:-1], self.bbox_embed, hs)):\n            layer_delta_unsig = layer_bbox_embed(layer_hs)\n            layer_outputs_unsig = layer_delta_unsig + inverse_sigmoid(layer_ref_sig)\n            layer_outputs_unsig = layer_outputs_unsig.sigmoid()\n            outputs_coord_list.append(layer_outputs_unsig)\n        outputs_coord_list = torch.stack(outputs_coord_list)\n\n        # outputs_class = self.class_embed(hs)\n        outputs_class = torch.stack([layer_cls_embed(layer_hs) for\n                                     layer_cls_embed, layer_hs in zip(self.class_embed, hs)])\n        if self.dn_number > 0 and dn_meta is not None:\n            outputs_class, outputs_coord_list = \\\n                dn_post_process(outputs_class, outputs_coord_list,\n                                dn_meta, self.aux_loss, self._set_aux_loss)\n        out = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord_list[-1]}\n        if self.aux_loss:\n            out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_coord_list)\n\n        # for encoder output\n        if hs_enc is not None:\n            # prepare intermediate outputs\n            interm_coord = ref_enc[-1]\n            interm_class = self.transformer.enc_out_class_embed(hs_enc[-1])\n            out['interm_outputs'] = {'pred_logits': interm_class, 'pred_boxes': interm_coord}\n            out['interm_outputs_for_matching_pre'] = {'pred_logits': interm_class, 'pred_boxes': init_box_proposal}\n\n            # prepare enc outputs\n            # import ipdb; ipdb.set_trace()\n            if hs_enc.shape[0] > 1:\n                enc_outputs_coord = []\n                enc_outputs_class = []\n                for layer_id, (layer_box_embed, layer_class_embed, layer_hs_enc, layer_ref_enc) in enumerate(\n                        zip(self.enc_bbox_embed, self.enc_class_embed, hs_enc[:-1], ref_enc[:-1])):\n                    layer_enc_delta_unsig = layer_box_embed(layer_hs_enc)\n                    layer_enc_outputs_coord_unsig = layer_enc_delta_unsig + inverse_sigmoid(layer_ref_enc)\n                    layer_enc_outputs_coord = layer_enc_outputs_coord_unsig.sigmoid()\n\n                    layer_enc_outputs_class = layer_class_embed(layer_hs_enc)\n                    enc_outputs_coord.append(layer_enc_outputs_coord)\n                    enc_outputs_class.append(layer_enc_outputs_class)\n\n                # enc_delta_unsig = self.enc_bbox_embed(hs_enc[:-1])\n                # enc_outputs_unsig = enc_delta_unsig + ref_enc[:-1]\n                # enc_outputs_coord = enc_outputs_unsig.sigmoid()\n                # enc_outputs_class = self.enc_class_embed(hs_enc[:-1])\n                out['enc_outputs'] = [\n                    {'pred_logits': a, 'pred_boxes': b} for a, b in zip(enc_outputs_class, enc_outputs_coord)\n                ]\n\n        out['dn_meta'] = dn_meta\n\n        return out\n\n    @torch.jit.unused\n    def _set_aux_loss(self, outputs_class, outputs_coord):\n        # this is a workaround to make torchscript happy, as torchscript\n        # doesn't support dictionary with non-homogeneous values, such\n        # as a dict having both a Tensor and a list.\n        return [{'pred_logits': a, 'pred_boxes': b}\n                for a, b in zip(outputs_class[:-1], outputs_coord[:-1])]\n\n\nclass SetCriterion(nn.Module):\n    \"\"\" This class computes the loss for Conditional DETR.\n    The process happens in two steps:\n        1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n        2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n    \"\"\"\n\n    def __init__(self, num_classes, matcher, weight_dict, focal_alpha, losses):\n        \"\"\" Create the criterion.\n        Parameters:\n            num_classes: number of object categories, omitting the special no-object category\n            matcher: module able to compute a matching between targets and proposals\n            weight_dict: dict containing as key the names of the losses and as values their relative weight.\n            losses: list of all the losses to be applied. See get_loss for list of available losses.\n            focal_alpha: alpha in Focal Loss\n        \"\"\"\n        super().__init__()\n        self.num_classes = num_classes\n        self.matcher = matcher\n        self.weight_dict = weight_dict\n        self.losses = losses\n        self.focal_alpha = focal_alpha\n\n    def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n        \"\"\"Classification loss (Binary focal loss)\n        targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n        \"\"\"\n        assert 'pred_logits' in outputs\n        src_logits = outputs['pred_logits']\n\n        idx = self._get_src_permutation_idx(indices)\n        target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n        target_classes = torch.full(src_logits.shape[:2], self.num_classes,\n                                    dtype=torch.int64, device=src_logits.device)\n        target_classes[idx] = target_classes_o\n\n        target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],\n                                            dtype=src_logits.dtype, layout=src_logits.layout, device=src_logits.device)\n        target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n\n        target_classes_onehot = target_classes_onehot[:, :, :-1]\n        loss_ce = sigmoid_focal_loss(src_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * \\\n                  src_logits.shape[1]\n        losses = {'loss_ce': loss_ce}\n\n        if log:\n            # TODO this should probably be a separate loss, not hacked in this one here\n            losses['class_error'] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n        return losses\n\n    @torch.no_grad()\n    def loss_cardinality(self, outputs, targets, indices, num_boxes):\n        \"\"\" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes\n        This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients\n        \"\"\"\n        pred_logits = outputs['pred_logits']\n        device = pred_logits.device\n        tgt_lengths = torch.as_tensor([len(v[\"labels\"]) for v in targets], device=device)\n        # Count the number of predictions that are NOT \"no-object\" (which is the last class)\n        card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)\n        card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())\n        losses = {'cardinality_error': card_err}\n        return losses\n\n    def loss_boxes(self, outputs, targets, indices, num_boxes):\n        \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n           targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n           The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.\n        \"\"\"\n        assert 'pred_boxes' in outputs\n        idx = self._get_src_permutation_idx(indices)\n        src_boxes = outputs['pred_boxes'][idx]\n        target_boxes = torch.cat([t['boxes'][i] for t, (_, i) in zip(targets, indices)], dim=0)\n\n        loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction='none')\n\n        losses = {}\n        losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n\n        loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(\n            box_ops.box_cxcywh_to_xyxy(src_boxes),\n            box_ops.box_cxcywh_to_xyxy(target_boxes)))\n        losses['loss_giou'] = loss_giou.sum() / num_boxes\n\n        # calculate the x,y and h,w loss\n        with torch.no_grad():\n            losses['loss_xy'] = loss_bbox[..., :2].sum() / num_boxes\n            losses['loss_hw'] = loss_bbox[..., 2:].sum() / num_boxes\n\n        return losses\n\n    def loss_masks(self, outputs, targets, indices, num_boxes):\n        \"\"\"Compute the losses related to the masks: the focal loss and the dice loss.\n           targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]\n        \"\"\"\n        assert \"pred_masks\" in outputs\n\n        src_idx = self._get_src_permutation_idx(indices)\n        tgt_idx = self._get_tgt_permutation_idx(indices)\n        src_masks = outputs[\"pred_masks\"]\n        src_masks = src_masks[src_idx]\n        masks = [t[\"masks\"] for t in targets]\n        # TODO use valid to mask invalid areas due to padding in loss\n        target_masks, valid = nested_tensor_from_tensor_list(masks).decompose()\n        target_masks = target_masks.to(src_masks)\n        target_masks = target_masks[tgt_idx]\n\n        # upsample predictions to the target size\n        src_masks = interpolate(src_masks[:, None], size=target_masks.shape[-2:],\n                                mode=\"bilinear\", align_corners=False)\n        src_masks = src_masks[:, 0].flatten(1)\n\n        target_masks = target_masks.flatten(1)\n        target_masks = target_masks.view(src_masks.shape)\n        losses = {\n            \"loss_mask\": sigmoid_focal_loss(src_masks, target_masks, num_boxes),\n            \"loss_dice\": dice_loss(src_masks, target_masks, num_boxes),\n        }\n        return losses\n\n    def _get_src_permutation_idx(self, indices):\n        # permute predictions following indices\n        batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])\n        src_idx = torch.cat([src for (src, _) in indices])\n        return batch_idx, src_idx\n\n    def _get_tgt_permutation_idx(self, indices):\n        # permute targets following indices\n        batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])\n        tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n        return batch_idx, tgt_idx\n\n    def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n        loss_map = {\n            'labels': self.loss_labels,\n            'cardinality': self.loss_cardinality,\n            'boxes': self.loss_boxes,\n            'masks': self.loss_masks,\n            # 'dn_labels': self.loss_dn_labels,\n            # 'dn_boxes': self.loss_dn_boxes\n        }\n        assert loss in loss_map, f'do you really want to compute {loss} loss?'\n        return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n    def forward(self, outputs, targets, return_indices=False):\n        \"\"\" This performs the loss computation.\n        Parameters:\n             outputs: dict of tensors, see the output specification of the model for the format\n             targets: list of dicts, such that len(targets) == batch_size.\n                      The expected keys in each dict depends on the losses applied, see each loss' doc\n            \n             return_indices: used for vis. if True, the layer0-5 indices will be returned as well.\n\n        \"\"\"\n        outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}\n        device = next(iter(outputs.values())).device\n        indices = self.matcher(outputs_without_aux, targets)\n\n        if return_indices:\n            indices0_copy = indices\n            indices_list = []\n\n        # Compute the average number of target boxes accross all nodes, for normalization purposes\n        num_boxes = sum(len(t[\"labels\"]) for t in targets)\n        num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=device)\n        if is_dist_avail_and_initialized():\n            torch.distributed.all_reduce(num_boxes)\n        num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n        # Compute all the requested losses\n        losses = {}\n\n        # prepare for dn loss\n        dn_meta = outputs['dn_meta']\n\n        if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta:\n            output_known_lbs_bboxes, single_pad, scalar = self.prep_for_dn(dn_meta)\n\n            dn_pos_idx = []\n            dn_neg_idx = []\n            for i in range(len(targets)):\n                if len(targets[i]['labels']) > 0:\n                    t = torch.range(0, len(targets[i]['labels']) - 1).long().cuda()\n                    t = t.unsqueeze(0).repeat(scalar, 1)\n                    tgt_idx = t.flatten()\n                    output_idx = (torch.tensor(range(scalar)) * single_pad).long().cuda().unsqueeze(1) + t\n                    output_idx = output_idx.flatten()\n                else:\n                    output_idx = tgt_idx = torch.tensor([]).long().cuda()\n\n                dn_pos_idx.append((output_idx, tgt_idx))\n                dn_neg_idx.append((output_idx + single_pad // 2, tgt_idx))\n\n            output_known_lbs_bboxes = dn_meta['output_known_lbs_bboxes']\n            l_dict = {}\n            for loss in self.losses:\n                kwargs = {}\n                if 'labels' in loss:\n                    kwargs = {'log': False}\n                l_dict.update(\n                    self.get_loss(loss, output_known_lbs_bboxes, targets, dn_pos_idx, num_boxes * scalar, **kwargs))\n\n            l_dict = {k + f'_dn': v for k, v in l_dict.items()}\n            losses.update(l_dict)\n        else:\n            l_dict = dict()\n            l_dict['loss_bbox_dn'] = torch.as_tensor(0.).to('cuda')\n            l_dict['loss_giou_dn'] = torch.as_tensor(0.).to('cuda')\n            l_dict['loss_ce_dn'] = torch.as_tensor(0.).to('cuda')\n            l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda')\n            l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda')\n            l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda')\n            losses.update(l_dict)\n\n        for loss in self.losses:\n            losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))\n\n        # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n        if 'aux_outputs' in outputs:\n            for idx, aux_outputs in enumerate(outputs['aux_outputs']):\n                indices = self.matcher(aux_outputs, targets)\n                if return_indices:\n                    indices_list.append(indices)\n                for loss in self.losses:\n                    if loss == 'masks':\n                        # Intermediate masks losses are too costly to compute, we ignore them.\n                        continue\n                    kwargs = {}\n                    if loss == 'labels':\n                        # Logging is enabled only for the last layer\n                        kwargs = {'log': False}\n                    l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n                    l_dict = {k + f'_{idx}': v for k, v in l_dict.items()}\n                    losses.update(l_dict)\n\n                if self.training and dn_meta and 'output_known_lbs_bboxes' in dn_meta:\n                    aux_outputs_known = output_known_lbs_bboxes['aux_outputs'][idx]\n                    l_dict = {}\n                    for loss in self.losses:\n                        kwargs = {}\n                        if 'labels' in loss:\n                            kwargs = {'log': False}\n\n                        l_dict.update(self.get_loss(loss, aux_outputs_known, targets, dn_pos_idx, num_boxes * scalar,\n                                                    **kwargs))\n\n                    l_dict = {k + f'_dn_{idx}': v for k, v in l_dict.items()}\n                    losses.update(l_dict)\n                else:\n                    l_dict = dict()\n                    l_dict['loss_bbox_dn'] = torch.as_tensor(0.).to('cuda')\n                    l_dict['loss_giou_dn'] = torch.as_tensor(0.).to('cuda')\n                    l_dict['loss_ce_dn'] = torch.as_tensor(0.).to('cuda')\n                    l_dict['loss_xy_dn'] = torch.as_tensor(0.).to('cuda')\n                    l_dict['loss_hw_dn'] = torch.as_tensor(0.).to('cuda')\n                    l_dict['cardinality_error_dn'] = torch.as_tensor(0.).to('cuda')\n                    l_dict = {k + f'_{idx}': v for k, v in l_dict.items()}\n                    losses.update(l_dict)\n\n        # interm_outputs loss\n        if 'interm_outputs' in outputs:\n            interm_outputs = outputs['interm_outputs']\n            indices = self.matcher(interm_outputs, targets)\n            if return_indices:\n                indices_list.append(indices)\n            for loss in self.losses:\n                if loss == 'masks':\n                    # Intermediate masks losses are too costly to compute, we ignore them.\n                    continue\n                kwargs = {}\n                if loss == 'labels':\n                    # Logging is enabled only for the last layer\n                    kwargs = {'log': False}\n                l_dict = self.get_loss(loss, interm_outputs, targets, indices, num_boxes, **kwargs)\n                l_dict = {k + f'_interm': v for k, v in l_dict.items()}\n                losses.update(l_dict)\n\n        # enc output loss\n        if 'enc_outputs' in outputs:\n            for i, enc_outputs in enumerate(outputs['enc_outputs']):\n                indices = self.matcher(enc_outputs, targets)\n                if return_indices:\n                    indices_list.append(indices)\n                for loss in self.losses:\n                    if loss == 'masks':\n                        # Intermediate masks losses are too costly to compute, we ignore them.\n                        continue\n                    kwargs = {}\n                    if loss == 'labels':\n                        # Logging is enabled only for the last layer\n                        kwargs = {'log': False}\n                    l_dict = self.get_loss(loss, enc_outputs, targets, indices, num_boxes, **kwargs)\n                    l_dict = {k + f'_enc_{i}': v for k, v in l_dict.items()}\n                    losses.update(l_dict)\n\n        if return_indices:\n            indices_list.append(indices0_copy)\n            return losses, indices_list\n\n        return losses\n\n    def prep_for_dn(self, dn_meta):\n        output_known_lbs_bboxes = dn_meta['output_known_lbs_bboxes']\n        num_dn_groups, pad_size = dn_meta['num_dn_group'], dn_meta['pad_size']\n        assert pad_size % num_dn_groups == 0\n        single_pad = pad_size // num_dn_groups\n\n        return output_known_lbs_bboxes, single_pad, num_dn_groups\n\n\nclass PostProcess(nn.Module):\n    \"\"\" This module converts the model's output into the format expected by the coco api\"\"\"\n\n    def __init__(self, num_select=100, nms_iou_threshold=-1) -> None:\n        super().__init__()\n        self.num_select = num_select\n        self.nms_iou_threshold = nms_iou_threshold\n\n    @torch.no_grad()\n    def forward(self, outputs, target_sizes, not_to_xyxy=False, test=False):\n        \"\"\" Perform the computation\n        Parameters:\n            outputs: raw outputs of the model\n            target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch\n                          For evaluation, this must be the original image size (before any data augmentation)\n                          For visualization, this should be the image size after data augment, but before padding\n        \"\"\"\n        num_select = self.num_select\n        out_logits, out_bbox = outputs['pred_logits'], outputs['pred_boxes']\n\n        assert len(out_logits) == len(target_sizes)\n        assert target_sizes.shape[1] == 2\n\n        prob = out_logits.sigmoid()\n        topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), num_select, dim=1)\n        scores = topk_values\n        topk_boxes = topk_indexes // out_logits.shape[2]\n        labels = topk_indexes % out_logits.shape[2]\n        if not_to_xyxy:\n            boxes = out_bbox\n        else:\n            boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)\n\n        if test:\n            assert not not_to_xyxy\n            boxes[:, :, 2:] = boxes[:, :, 2:] - boxes[:, :, :2]\n        boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n\n        # and from relative [0, 1] to absolute [0, height] coordinates\n        img_h, img_w = target_sizes.unbind(1)\n        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n        boxes = boxes * scale_fct[:, None, :]\n\n        if self.nms_iou_threshold > 0:\n            item_indices = [nms(b, s, iou_threshold=self.nms_iou_threshold) for b, s in zip(boxes, scores)]\n            # import ipdb; ipdb.set_trace()\n            results = [{'scores': s[i], 'labels': l[i], 'boxes': b[i]} for s, l, b, i in\n                       zip(scores, labels, boxes, item_indices)]\n        else:\n            results = [{'scores': s, 'labels': l, 'boxes': b} for s, l, b in zip(scores, labels, boxes)]\n\n        return results\n\n\ndef build_dino(args):\n    # the `num_classes` naming here is somewhat misleading.\n    # it indeed corresponds to `max_obj_id + 1`, where max_obj_id\n    # is the maximum id for a class in your dataset. For example,\n    # COCO has a max_obj_id of 90, so we pass `num_classes` to be 91.\n    # As another example, for a dataset that has a single class with id 1,\n    # you should pass `num_classes` to be 2 (max_obj_id + 1).\n    # For more details on this, check the following discussion\n    # https://github.com/facebookresearch/detr/issues/108#issuecomment-650269223\n    # num_classes = 20 if args.dataset_file != 'coco' else 91\n    # if args.dataset_file == \"coco_panoptic\":\n    #     # for panoptic, we just add a num_classes that is large enough to hold\n    #     # max_obj_id + 1, but the exact value doesn't really matter\n    #     num_classes = 250\n    # if args.dataset_file == 'o365':\n    #     num_classes = 366\n    # if args.dataset_file == 'vanke':\n    #     num_classes = 51\n    num_classes = args.num_classes\n\n    backbone = build_backbone(args)\n\n    transformer = build_deformable_transformer(args)\n\n    try:\n        match_unstable_error = args.match_unstable_error\n        dn_labelbook_size = args.dn_labelbook_size\n    except:\n        match_unstable_error = True\n        dn_labelbook_size = num_classes\n\n    try:\n        dec_pred_class_embed_share = args.dec_pred_class_embed_share\n    except:\n        dec_pred_class_embed_share = True\n    try:\n        dec_pred_bbox_embed_share = args.dec_pred_bbox_embed_share\n    except:\n        dec_pred_bbox_embed_share = True\n\n    model = DINO(\n        backbone,\n        transformer,\n        num_classes=num_classes,\n        num_queries=args.num_queries,\n        aux_loss=True,\n        iter_update=True,\n        query_dim=4,\n        random_refpoints_xy=args.random_refpoints_xy,\n        fix_refpoints_hw=args.fix_refpoints_hw,\n        num_feature_levels=args.num_feature_levels,\n        nheads=args.nheads,\n        dec_pred_class_embed_share=dec_pred_class_embed_share,\n        dec_pred_bbox_embed_share=dec_pred_bbox_embed_share,\n        # two stage\n        two_stage_type=args.two_stage_type,\n        # box_share\n        two_stage_bbox_embed_share=args.two_stage_bbox_embed_share,\n        two_stage_class_embed_share=args.two_stage_class_embed_share,\n        decoder_sa_type=args.decoder_sa_type,\n        num_patterns=args.num_patterns,\n        dn_number=args.dn_number if args.use_dn else 0,\n        dn_box_noise_scale=args.dn_box_noise_scale,\n        dn_label_noise_ratio=args.dn_label_noise_ratio,\n        dn_labelbook_size=dn_labelbook_size,\n    )\n    matcher = build_matcher(args)\n\n    # prepare weight dict\n    box_postprocessor = PostProcess(num_select=args.num_select, nms_iou_threshold=args.nms_iou_threshold)\n\n    return model, matcher, box_postprocessor\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/dn_components.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# DN-DETR\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n\n\nimport torch\nfrom .util.misc import (NestedTensor, nested_tensor_from_tensor_list,\n                        accuracy, get_world_size, interpolate,\n                        is_dist_avail_and_initialized, inverse_sigmoid)\n# from .DABDETR import sigmoid_focal_loss\nfrom .util import box_ops\nimport torch.nn.functional as F\n\n\ndef prepare_for_cdn(dn_args, training, num_queries, num_classes, hidden_dim, label_enc):\n    \"\"\"\n        A major difference of DINO from DN-DETR is that the author process pattern embedding pattern embedding in its detector\n        forward function and use learnable tgt embedding, so we change this function a little bit.\n        :param dn_args: targets, dn_number, label_noise_ratio, box_noise_scale\n        :param training: if it is training or inference\n        :param num_queries: number of queires\n        :param num_classes: number of classes\n        :param hidden_dim: transformer hidden dim\n        :param label_enc: encode labels in dn\n        :return:\n        \"\"\"\n    if training:\n        targets, dn_number, label_noise_ratio, box_noise_scale = dn_args\n        # positive and negative dn queries\n        dn_number = dn_number * 2\n        known = [(torch.ones_like(t['labels'])).cuda() for t in targets]\n        batch_size = len(known)\n        known_num = [sum(k) for k in known]\n        if int(max(known_num)) == 0:\n            dn_number = 1\n        else:\n            if dn_number >= 100:\n                dn_number = dn_number // (int(max(known_num) * 2))\n            elif dn_number < 1:\n                dn_number = 1\n        if dn_number == 0:\n            dn_number = 1\n        unmask_bbox = unmask_label = torch.cat(known)\n        labels = torch.cat([t['labels'] for t in targets])\n        boxes = torch.cat([t['boxes'] for t in targets])\n        batch_idx = torch.cat([torch.full_like(t['labels'].long(), i) for i, t in enumerate(targets)])\n\n        known_indice = torch.nonzero(unmask_label + unmask_bbox)\n        known_indice = known_indice.view(-1)\n\n        known_indice = known_indice.repeat(2 * dn_number, 1).view(-1)\n        known_labels = labels.repeat(2 * dn_number, 1).view(-1)\n        known_bid = batch_idx.repeat(2 * dn_number, 1).view(-1)\n        known_bboxs = boxes.repeat(2 * dn_number, 1)\n        known_labels_expaned = known_labels.clone()\n        known_bbox_expand = known_bboxs.clone()\n\n        if label_noise_ratio > 0:\n            p = torch.rand_like(known_labels_expaned.float())\n            chosen_indice = torch.nonzero(p < (label_noise_ratio * 0.5)).view(-1)  # half of bbox prob\n            new_label = torch.randint_like(chosen_indice, 0, num_classes)  # randomly put a new one here\n            known_labels_expaned.scatter_(0, chosen_indice, new_label)\n        single_pad = int(max(known_num))\n\n        pad_size = int(single_pad * 2 * dn_number)\n        positive_idx = torch.tensor(range(len(boxes))).long().cuda().unsqueeze(0).repeat(dn_number, 1)\n        positive_idx += (torch.tensor(range(dn_number)) * len(boxes) * 2).long().cuda().unsqueeze(1)\n        positive_idx = positive_idx.flatten()\n        negative_idx = positive_idx + len(boxes)\n        if box_noise_scale > 0:\n            known_bbox_ = torch.zeros_like(known_bboxs)\n            known_bbox_[:, :2] = known_bboxs[:, :2] - known_bboxs[:, 2:] / 2\n            known_bbox_[:, 2:] = known_bboxs[:, :2] + known_bboxs[:, 2:] / 2\n\n            diff = torch.zeros_like(known_bboxs)\n            diff[:, :2] = known_bboxs[:, 2:] / 2\n            diff[:, 2:] = known_bboxs[:, 2:] / 2\n\n            rand_sign = torch.randint_like(known_bboxs, low=0, high=2, dtype=torch.float32) * 2.0 - 1.0\n            rand_part = torch.rand_like(known_bboxs)\n            rand_part[negative_idx] += 1.0\n            rand_part *= rand_sign\n            known_bbox_ = known_bbox_ + torch.mul(rand_part,\n                                                  diff).cuda() * box_noise_scale\n            known_bbox_ = known_bbox_.clamp(min=0.0, max=1.0)\n            known_bbox_expand[:, :2] = (known_bbox_[:, :2] + known_bbox_[:, 2:]) / 2\n            known_bbox_expand[:, 2:] = known_bbox_[:, 2:] - known_bbox_[:, :2]\n\n        m = known_labels_expaned.long().to('cuda')\n        input_label_embed = label_enc(m)\n        input_bbox_embed = inverse_sigmoid(known_bbox_expand)\n\n        padding_label = torch.zeros(pad_size, hidden_dim).cuda()\n        padding_bbox = torch.zeros(pad_size, 4).cuda()\n\n        input_query_label = padding_label.repeat(batch_size, 1, 1)\n        input_query_bbox = padding_bbox.repeat(batch_size, 1, 1)\n\n        map_known_indice = torch.tensor([]).to('cuda')\n        if len(known_num):\n            map_known_indice = torch.cat([torch.tensor(range(num)) for num in known_num])  # [1,2, 1,2,3]\n            map_known_indice = torch.cat([map_known_indice + single_pad * i for i in range(2 * dn_number)]).long()\n        if len(known_bid):\n            input_query_label[(known_bid.long(), map_known_indice)] = input_label_embed\n            input_query_bbox[(known_bid.long(), map_known_indice)] = input_bbox_embed\n\n        tgt_size = pad_size + num_queries\n        attn_mask = torch.ones(tgt_size, tgt_size).to('cuda') < 0\n        # match query cannot see the reconstruct\n        attn_mask[pad_size:, :pad_size] = True\n        # reconstruct cannot see each other\n        for i in range(dn_number):\n            if i == 0:\n                attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n            if i == dn_number - 1:\n                attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * i * 2] = True\n            else:\n                attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), single_pad * 2 * (i + 1):pad_size] = True\n                attn_mask[single_pad * 2 * i:single_pad * 2 * (i + 1), :single_pad * 2 * i] = True\n\n        dn_meta = {\n            'pad_size': pad_size,\n            'num_dn_group': dn_number,\n        }\n    else:\n\n        input_query_label = None\n        input_query_bbox = None\n        attn_mask = None\n        dn_meta = None\n\n    return input_query_label, input_query_bbox, attn_mask, dn_meta\n\n\ndef dn_post_process(outputs_class, outputs_coord, dn_meta, aux_loss, _set_aux_loss):\n    \"\"\"\n        post process of dn after output from the transformer\n        put the dn part in the dn_meta\n    \"\"\"\n    if dn_meta and dn_meta['pad_size'] > 0:\n        output_known_class = outputs_class[:, :, :dn_meta['pad_size'], :]\n        output_known_coord = outputs_coord[:, :, :dn_meta['pad_size'], :]\n        outputs_class = outputs_class[:, :, dn_meta['pad_size']:, :]\n        outputs_coord = outputs_coord[:, :, dn_meta['pad_size']:, :]\n        out = {'pred_logits': output_known_class[-1], 'pred_boxes': output_known_coord[-1]}\n        if aux_loss:\n            out['aux_outputs'] = _set_aux_loss(output_known_class, output_known_coord)\n        dn_meta['output_known_lbs_bboxes'] = out\n    return outputs_class, outputs_coord\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/focal.py",
    "content": "# --------------------------------------------------------\n# FocalNet for Semantic Segmentation\n# Copyright (c) 2022 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Jianwei Yang\n# --------------------------------------------------------\nimport math\nimport time\nimport numpy as np\nimport json\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as checkpoint\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\nfrom .util.misc import NestedTensor\n\nclass Mlp(nn.Module):\n    \"\"\" Multilayer perceptron.\"\"\"\n\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Linear(in_features, hidden_features)\n        self.act = act_layer()\n        self.fc2 = nn.Linear(hidden_features, out_features)\n        self.drop = nn.Dropout(drop)\n\n    def forward(self, x):\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\nclass FocalModulation(nn.Module):\n    \"\"\" Focal Modulation\n\n    Args:\n        dim (int): Number of input channels.\n        proj_drop (float, optional): Dropout ratio of output. Default: 0.0\n        focal_level (int): Number of focal levels\n        focal_window (int): Focal window size at focal level 1\n        focal_factor (int, default=2): Step to increase the focal window\n        use_postln (bool, default=False): Whether use post-modulation layernorm\n    \"\"\"\n\n    def __init__(self, dim, proj_drop=0., focal_level=2, focal_window=7, focal_factor=2, use_postln=False, \n        use_postln_in_modulation=False, normalize_modulator=False):\n\n        super().__init__()\n        self.dim = dim\n\n        # specific args for focalv3\n        self.focal_level = focal_level\n        self.focal_window = focal_window\n        self.focal_factor = focal_factor\n        self.use_postln_in_modulation = use_postln_in_modulation\n        self.normalize_modulator = normalize_modulator\n\n        self.f = nn.Linear(dim, 2*dim+(self.focal_level+1), bias=True)\n        self.h = nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0, groups=1, bias=True)\n\n        self.act = nn.GELU()\n        self.proj = nn.Linear(dim, dim)\n        self.proj_drop = nn.Dropout(proj_drop)\n        self.focal_layers = nn.ModuleList()\n\n        if self.use_postln_in_modulation:\n            self.ln = nn.LayerNorm(dim)\n\n        for k in range(self.focal_level):\n            kernel_size = self.focal_factor*k + self.focal_window\n            self.focal_layers.append(\n                nn.Sequential(\n                    nn.Conv2d(dim, dim, kernel_size=kernel_size, stride=1, groups=dim, \n                        padding=kernel_size//2, bias=False),\n                    nn.GELU(),\n                    )\n                )\n\n    def forward(self, x):\n        \"\"\" Forward function.\n\n        Args:\n            x: input features with shape of (B, H, W, C)\n        \"\"\"\n        B, nH, nW, C = x.shape\n        x = self.f(x)\n        x = x.permute(0, 3, 1, 2).contiguous()\n        q, ctx, gates = torch.split(x, (C, C, self.focal_level+1), 1)\n        \n        ctx_all = 0\n        for l in range(self.focal_level):                     \n            ctx = self.focal_layers[l](ctx)\n            ctx_all = ctx_all + ctx*gates[:, l:l+1]\n        ctx_global = self.act(ctx.mean(2, keepdim=True).mean(3, keepdim=True))\n        ctx_all = ctx_all + ctx_global*gates[:,self.focal_level:]\n        if self.normalize_modulator:\n            ctx_all = ctx_all / (self.focal_level+1)\n\n        x_out = q * self.h(ctx_all)\n        x_out = x_out.permute(0, 2, 3, 1).contiguous()\n        if self.use_postln_in_modulation:\n            x_out = self.ln(x_out)            \n        x_out = self.proj(x_out)\n        x_out = self.proj_drop(x_out)\n        return x_out\n\nclass FocalModulationBlock(nn.Module):\n    \"\"\" Focal Modulation Block.\n\n    Args:\n        dim (int): Number of input channels.\n        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n        drop (float, optional): Dropout rate. Default: 0.0\n        drop_path (float, optional): Stochastic depth rate. Default: 0.0\n        act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm\n        focal_level (int): number of focal levels\n        focal_window (int): focal kernel size at level 1\n    \"\"\"\n\n    def __init__(self, dim, mlp_ratio=4., drop=0., drop_path=0., \n                 act_layer=nn.GELU, norm_layer=nn.LayerNorm,\n                 focal_level=2, focal_window=9, \n                 use_postln=False, use_postln_in_modulation=False, \n                 normalize_modulator=False, \n                 use_layerscale=False, \n                 layerscale_value=1e-4):\n        super().__init__()\n        self.dim = dim\n        self.mlp_ratio = mlp_ratio\n        self.focal_window = focal_window\n        self.focal_level = focal_level\n        self.use_postln = use_postln\n        self.use_layerscale = use_layerscale\n\n        self.norm1 = norm_layer(dim)\n        self.modulation = FocalModulation(\n            dim, focal_window=self.focal_window, focal_level=self.focal_level, proj_drop=drop, \n            use_postln_in_modulation=use_postln_in_modulation, \n            normalize_modulator=normalize_modulator, \n        )            \n\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n        self.norm2 = norm_layer(dim)\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n        self.H = None\n        self.W = None\n\n        self.gamma_1 = 1.0\n        self.gamma_2 = 1.0\n        if self.use_layerscale:\n            self.gamma_1 = nn.Parameter(layerscale_value * torch.ones((dim)), requires_grad=True)\n            self.gamma_2 = nn.Parameter(layerscale_value * torch.ones((dim)), requires_grad=True)\n\n    def forward(self, x):\n        \"\"\" Forward function.\n\n        Args:\n            x: Input feature, tensor size (B, H*W, C).\n            H, W: Spatial resolution of the input feature.\n        \"\"\"\n        B, L, C = x.shape\n        H, W = self.H, self.W\n        assert L == H * W, \"input feature has wrong size\"\n\n        shortcut = x\n        if not self.use_postln:\n            x = self.norm1(x)\n        x = x.view(B, H, W, C)\n        \n        # FM\n        x = self.modulation(x).view(B, H * W, C)\n        if self.use_postln:\n            x = self.norm1(x)\n\n        # FFN\n        x = shortcut + self.drop_path(self.gamma_1 * x)\n\n        if self.use_postln:\n            x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))\n        else:\n            x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))\n\n        return x\n\nclass BasicLayer(nn.Module):\n    \"\"\" A basic focal modulation layer for one stage.\n\n    Args:\n        dim (int): Number of feature channels\n        depth (int): Depths of this stage.\n        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.\n        drop (float, optional): Dropout rate. Default: 0.0\n        drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n        norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n        downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n        focal_level (int): Number of focal levels\n        focal_window (int): Focal window size at focal level 1\n        use_conv_embed (bool): Use overlapped convolution for patch embedding or now. Default: False\n        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False\n    \"\"\"\n\n    def __init__(self,\n                 dim,\n                 depth,\n                 mlp_ratio=4.,\n                 drop=0.,\n                 drop_path=0.,\n                 norm_layer=nn.LayerNorm,\n                 downsample=None,\n                 focal_window=9, \n                 focal_level=2, \n                 use_conv_embed=False,     \n                 use_postln=False,          \n                 use_postln_in_modulation=False, \n                 normalize_modulator=False, \n                 use_layerscale=False,                   \n                 use_checkpoint=False\n        ):\n        super().__init__()\n        self.depth = depth\n        self.use_checkpoint = use_checkpoint\n\n        # build blocks\n        self.blocks = nn.ModuleList([\n            FocalModulationBlock(\n                dim=dim,\n                mlp_ratio=mlp_ratio,\n                drop=drop,\n                drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,\n                focal_window=focal_window, \n                focal_level=focal_level, \n                use_postln=use_postln, \n                use_postln_in_modulation=use_postln_in_modulation, \n                normalize_modulator=normalize_modulator, \n                use_layerscale=use_layerscale, \n                norm_layer=norm_layer)\n            for i in range(depth)])\n\n        # patch merging layer\n        if downsample is not None:\n            self.downsample = downsample(\n                patch_size=2, \n                in_chans=dim, embed_dim=2*dim, \n                use_conv_embed=use_conv_embed, \n                norm_layer=norm_layer, \n                is_stem=False\n            )\n\n        else:\n            self.downsample = None\n\n    def forward(self, x, H, W):\n        \"\"\" Forward function.\n\n        Args:\n            x: Input feature, tensor size (B, H*W, C).\n            H, W: Spatial resolution of the input feature.\n        \"\"\"\n\n        for blk in self.blocks:\n            blk.H, blk.W = H, W\n            if self.use_checkpoint:\n                x = checkpoint.checkpoint(blk, x)\n            else:\n                x = blk(x)\n        if self.downsample is not None:\n            x_reshaped = x.transpose(1, 2).view(x.shape[0], x.shape[-1], H, W)\n            x_down = self.downsample(x_reshaped)      \n            x_down = x_down.flatten(2).transpose(1, 2)            \n            Wh, Ww = (H + 1) // 2, (W + 1) // 2\n            return x, H, W, x_down, Wh, Ww\n        else:\n            return x, H, W, x, H, W\n\n\nclass PatchEmbed(nn.Module):\n    \"\"\" Image to Patch Embedding\n\n    Args:\n        patch_size (int): Patch token size. Default: 4.\n        in_chans (int): Number of input image channels. Default: 3.\n        embed_dim (int): Number of linear projection output channels. Default: 96.\n        norm_layer (nn.Module, optional): Normalization layer. Default: None\n        use_conv_embed (bool): Whether use overlapped convolution for patch embedding. Default: False\n        is_stem (bool): Is the stem block or not. \n    \"\"\"\n\n    def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None, use_conv_embed=False, is_stem=False):\n        super().__init__()\n        patch_size = to_2tuple(patch_size)\n        self.patch_size = patch_size\n\n        self.in_chans = in_chans\n        self.embed_dim = embed_dim\n\n        if use_conv_embed:\n            # if we choose to use conv embedding, then we treat the stem and non-stem differently\n            if is_stem:\n                kernel_size = 7; padding = 2; stride = 4\n            else:\n                kernel_size = 3; padding = 1; stride = 2\n            self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)                    \n        else:\n            self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\n\n        if norm_layer is not None:\n            self.norm = norm_layer(embed_dim)\n        else:\n            self.norm = None\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        _, _, H, W = x.size()\n        if W % self.patch_size[1] != 0:\n            x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))\n        if H % self.patch_size[0] != 0:\n            x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))\n\n        x = self.proj(x)  # B C Wh Ww\n        if self.norm is not None:\n            Wh, Ww = x.size(2), x.size(3)\n            x = x.flatten(2).transpose(1, 2)\n            x = self.norm(x)\n            x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)\n\n        return x\n\n\nclass FocalNet(nn.Module):\n    \"\"\" FocalNet backbone.\n\n    Args:\n        pretrain_img_size (int): Input image size for training the pretrained model,\n            used in absolute postion embedding. Default 224.\n        patch_size (int | tuple(int)): Patch size. Default: 4.\n        in_chans (int): Number of input image channels. Default: 3.\n        embed_dim (int): Number of linear projection output channels. Default: 96.\n        depths (tuple[int]): Depths of each Swin Transformer stage.\n        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.\n        drop_rate (float): Dropout rate.\n        drop_path_rate (float): Stochastic depth rate. Default: 0.2.\n        norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n        patch_norm (bool): If True, add normalization after patch embedding. Default: True.\n        out_indices (Sequence[int]): Output from which stages.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            -1 means not freezing any parameters.\n        focal_levels (Sequence[int]): Number of focal levels at four stages\n        focal_windows (Sequence[int]): Focal window sizes at first focal level at four stages\n        use_conv_embed (bool): Whether use overlapped convolution for patch embedding\n        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n    \"\"\"\n\n    def __init__(self,\n                 pretrain_img_size=1600,\n                 patch_size=4,\n                 in_chans=3,\n                 embed_dim=96,\n                 depths=[2, 2, 6, 2],\n                 mlp_ratio=4.,\n                 drop_rate=0.,\n                 drop_path_rate=0.3, # 0.3 or 0.4 works better for large+ models\n                 norm_layer=nn.LayerNorm,\n                 patch_norm=True,\n                 out_indices=(0, 1, 2, 3),\n                 frozen_stages=-1,\n                 focal_levels=[3,3,3,3], \n                 focal_windows=[3,3,3,3],\n                 use_conv_embed=False, \n                 use_postln=False, \n                 use_postln_in_modulation=False, \n                 use_layerscale=False, \n                 normalize_modulator=False, \n                 use_checkpoint=False,                  \n        ):\n        super().__init__()\n\n        self.pretrain_img_size = pretrain_img_size\n        self.num_layers = len(depths)\n        self.embed_dim = embed_dim\n        self.patch_norm = patch_norm\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n\n        # split image into non-overlapping patches\n        self.patch_embed = PatchEmbed(\n            patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n            norm_layer=norm_layer if self.patch_norm else None, \n            use_conv_embed=use_conv_embed, is_stem=True)\n\n        self.pos_drop = nn.Dropout(p=drop_rate)\n\n        # stochastic depth\n        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]  # stochastic depth decay rule\n\n        # build layers\n        self.layers = nn.ModuleList()\n        for i_layer in range(self.num_layers):\n            layer = BasicLayer(\n                dim=int(embed_dim * 2 ** i_layer),\n                depth=depths[i_layer],\n                mlp_ratio=mlp_ratio,\n                drop=drop_rate,\n                drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n                norm_layer=norm_layer,\n                downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,\n                focal_window=focal_windows[i_layer], \n                focal_level=focal_levels[i_layer], \n                use_conv_embed=use_conv_embed,\n                use_postln=use_postln, \n                use_postln_in_modulation=use_postln_in_modulation, \n                normalize_modulator=normalize_modulator, \n                use_layerscale=use_layerscale, \n                use_checkpoint=use_checkpoint)\n            self.layers.append(layer)\n\n        num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]\n        self.num_features = num_features\n\n        # add a norm layer for each output\n        for i_layer in out_indices:\n            layer = norm_layer(num_features[i_layer])\n            layer_name = f'norm{i_layer}'\n            self.add_module(layer_name, layer)\n\n        self._freeze_stages()\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            self.patch_embed.eval()\n            for param in self.patch_embed.parameters():\n                param.requires_grad = False\n\n        if self.frozen_stages >= 2:\n            self.pos_drop.eval()\n            for i in range(0, self.frozen_stages - 1):\n                m = self.layers[i]\n                m.eval()\n                for param in m.parameters():\n                    param.requires_grad = False\n\n    def init_weights(self, pretrained=None):\n        \"\"\"Initialize the weights in backbone.\n\n        Args:\n            pretrained (str, optional): Path to pre-trained weights.\n                Defaults to None.\n        \"\"\"\n\n        def _init_weights(m):\n            if isinstance(m, nn.Linear):\n                trunc_normal_(m.weight, std=.02)\n                if isinstance(m, nn.Linear) and m.bias is not None:\n                    nn.init.constant_(m.bias, 0)\n            elif isinstance(m, nn.LayerNorm):\n                nn.init.constant_(m.bias, 0)\n                nn.init.constant_(m.weight, 1.0)\n\n        if isinstance(pretrained, str):\n            self.apply(_init_weights)\n            logger = get_root_logger()\n            load_checkpoint(self, pretrained, strict=False, logger=logger)\n        elif pretrained is None:\n            self.apply(_init_weights)\n        else:\n            raise TypeError('pretrained must be a str or None')\n\n    def forward(self, tensor_list: NestedTensor):\n        \"\"\"Forward function.\"\"\"\n        x = tensor_list.tensors\n        tic = time.time()\n        x = self.patch_embed(x)\n        Wh, Ww = x.size(2), x.size(3)\n\n        x = x.flatten(2).transpose(1, 2)\n        x = self.pos_drop(x)\n\n        outs = []\n        for i in range(self.num_layers):\n            layer = self.layers[i]\n            x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)            \n            if i in self.out_indices:\n                norm_layer = getattr(self, f'norm{i}')\n                x_out = norm_layer(x_out)\n                \n                out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()\n                outs.append(out)\n\n        toc = time.time()\n\n        # collect for nesttensors        \n        outs_dict = {}\n        for idx, out_i in enumerate(outs):\n            m = tensor_list.mask\n            assert m is not None\n            mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0]\n            outs_dict[idx] = NestedTensor(out_i, mask)\n\n        return outs_dict\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode while keep layers freezed.\"\"\"\n        super(FocalNet, self).train(mode)\n        self._freeze_stages()\n\n\n\ndef build_focalnet(modelname, **kw):\n    assert modelname in [\n        'focalnet_L_384_22k', \n        'focalnet_L_384_22k_fl4', \n        'focalnet_XL_384_22k', \n        'focalnet_XL_384_22k_fl4', \n        'focalnet_H_224_22k', \n        'focalnet_H_224_22k_fl4',         \n        ]\n\n    if 'focal_levels' in kw:\n        kw['focal_levels'] = [kw['focal_levels']] * 4\n\n    if 'focal_windows' in kw:\n        kw['focal_windows'] = [kw['focal_windows']] * 4\n\n    model_para_dict = {\n        'focalnet_L_384_22k': dict(\n            embed_dim=192,\n            depths=[ 2, 2, 18, 2 ],\n            focal_levels=kw.get('focal_levels', [3, 3, 3, 3]), \n            focal_windows=kw.get('focal_windows', [5, 5, 5, 5]), \n            use_conv_embed=True, \n            use_postln=True, \n            use_postln_in_modulation=False, \n            use_layerscale=True, \n            normalize_modulator=False, \n        ),\n        'focalnet_L_384_22k_fl4': dict(\n            embed_dim=192,\n            depths=[ 2, 2, 18, 2 ],\n            focal_levels=kw.get('focal_levels', [4, 4, 4, 4]), \n            focal_windows=kw.get('focal_windows', [3, 3, 3, 3]), \n            use_conv_embed=True, \n            use_postln=True, \n            use_postln_in_modulation=False, \n            use_layerscale=True, \n            normalize_modulator=True, \n        ),\n        'focalnet_XL_384_22k': dict(\n            embed_dim=256,\n            depths=[ 2, 2, 18, 2 ],\n            focal_levels=kw.get('focal_levels', [3, 3, 3, 3]), \n            focal_windows=kw.get('focal_windows', [5, 5, 5, 5]), \n            use_conv_embed=True, \n            use_postln=True, \n            use_postln_in_modulation=False, \n            use_layerscale=True, \n            normalize_modulator=False, \n        ),   \n        'focalnet_XL_384_22k_fl4': dict(\n            embed_dim=256,\n            depths=[ 2, 2, 18, 2 ],\n            focal_levels=kw.get('focal_levels', [4, 4, 4, 4]), \n            focal_windows=kw.get('focal_windows', [3, 3, 3, 3]), \n            use_conv_embed=True, \n            use_postln=True, \n            use_postln_in_modulation=False, \n            use_layerscale=True, \n            normalize_modulator=True, \n        ),           \n        'focalnet_H_224_22k': dict(\n            embed_dim=352,\n            depths=[ 2, 2, 18, 2 ],\n            focal_levels=kw.get('focal_levels', [3, 3, 3, 3]), \n            focal_windows=kw.get('focal_windows', [3, 3, 3, 3]), \n            use_conv_embed=True, \n            use_postln=True, \n            use_layerscale=True, \n            use_postln_in_modulation=True, \n            normalize_modulator=False, \n        ),   \n        'focalnet_H_224_22k_fl4': dict(\n            embed_dim=352,\n            depths=[ 2, 2, 18, 2 ],\n            focal_levels=kw.get('focal_levels', [4, 4, 4, 4]), \n            focal_windows=kw.get('focal_windows', [3, 3, 3, 3]), \n            use_conv_embed=True, \n            use_postln=True, \n            use_postln_in_modulation=True, \n            use_layerscale=True, \n            normalize_modulator=False, \n        ),                        \n    }\n\n    kw_cgf = model_para_dict[modelname]\n    kw_cgf.update(kw)\n    model = FocalNet(**kw_cgf)\n    return model"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/matcher.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modules to compute the matching cost and solve the corresponding LSAP.\n# Copyright (c) 2021 Microsoft. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# ------------------------------------------------------------------------\n# Modified from Deformable DETR (https://github.com/fundamentalvision/Deformable-DETR)\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# ------------------------------------------------------------------------\n\n\nimport torch, os\nfrom scipy.optimize import linear_sum_assignment\nfrom torch import nn\n\nfrom .util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou\n\n\nclass HungarianMatcher(nn.Module):\n    \"\"\"This class computes an assignment between the targets and the predictions of the network\n    For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n    there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n    while the others are un-matched (and thus treated as non-objects).\n    \"\"\"\n\n    def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1, focal_alpha = 0.25):\n        \"\"\"Creates the matcher\n        Params:\n            cost_class: This is the relative weight of the classification error in the matching cost\n            cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n            cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n        \"\"\"\n        super().__init__()\n        self.cost_class = cost_class\n        self.cost_bbox = cost_bbox\n        self.cost_giou = cost_giou\n        assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n\n        self.focal_alpha = focal_alpha\n\n    @torch.no_grad()\n    def forward(self, outputs, targets):\n        \"\"\" Performs the matching\n        Params:\n            outputs: This is a dict that contains at least these entries:\n                 \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n                 \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates\n            targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n                 \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n                           objects in the target) containing the class labels\n                 \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates\n        Returns:\n            A list of size batch_size, containing tuples of (index_i, index_j) where:\n                - index_i is the indices of the selected predictions (in order)\n                - index_j is the indices of the corresponding selected targets (in order)\n            For each batch element, it holds:\n                len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n        \"\"\"\n\n        bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n        # We flatten to compute the cost matrices in a batch\n        out_prob = outputs[\"pred_logits\"].flatten(0, 1).sigmoid()  # [batch_size * num_queries, num_classes]\n        out_bbox = outputs[\"pred_boxes\"].flatten(0, 1)  # [batch_size * num_queries, 4]\n\n        # Also concat the target labels and boxes\n        tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n        tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n        # Compute the classification cost.\n        alpha = self.focal_alpha\n        gamma = 2.0\n        neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())\n        pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n        cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n        # Compute the L1 cost between boxes\n        cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n            \n        # Compute the giou cost betwen boxes            \n        cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n        # Final cost matrix\n        C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n        C = C.view(bs, num_queries, -1).cpu()\n\n        sizes = [len(v[\"boxes\"]) for v in targets]\n        indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]\n\n\nclass SimpleMinsumMatcher(nn.Module):\n    \"\"\"This class computes an assignment between the targets and the predictions of the network\n    For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n    there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n    while the others are un-matched (and thus treated as non-objects).\n    \"\"\"\n\n    def __init__(self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1, focal_alpha = 0.25):\n        \"\"\"Creates the matcher\n        Params:\n            cost_class: This is the relative weight of the classification error in the matching cost\n            cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n            cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n        \"\"\"\n        super().__init__()\n        self.cost_class = cost_class\n        self.cost_bbox = cost_bbox\n        self.cost_giou = cost_giou\n        assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n\n        self.focal_alpha = focal_alpha\n\n    @torch.no_grad()\n    def forward(self, outputs, targets):\n        \"\"\" Performs the matching\n        Params:\n            outputs: This is a dict that contains at least these entries:\n                 \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n                 \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates\n            targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n                 \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n                           objects in the target) containing the class labels\n                 \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates\n        Returns:\n            A list of size batch_size, containing tuples of (index_i, index_j) where:\n                - index_i is the indices of the selected predictions (in order)\n                - index_j is the indices of the corresponding selected targets (in order)\n            For each batch element, it holds:\n                len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n        \"\"\"\n\n        bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n        # We flatten to compute the cost matrices in a batch\n        out_prob = outputs[\"pred_logits\"].flatten(0, 1).sigmoid()  # [batch_size * num_queries, num_classes]\n        out_bbox = outputs[\"pred_boxes\"].flatten(0, 1)  # [batch_size * num_queries, 4]\n\n        # Also concat the target labels and boxes\n        tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n        tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n        # Compute the classification cost.\n        alpha = self.focal_alpha\n        gamma = 2.0\n        neg_cost_class = (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())\n        pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n        cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n        # Compute the L1 cost between boxes\n        cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n            \n        # Compute the giou cost betwen boxes            \n        cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n        # Final cost matrix\n        C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n        C = C.view(bs, num_queries, -1)\n\n        sizes = [len(v[\"boxes\"]) for v in targets]\n        indices = []\n        device = C.device\n        for i, (c, _size) in enumerate(zip(C.split(sizes, -1), sizes)):\n            weight_mat = c[i]\n            idx_i = weight_mat.min(0)[1]\n            idx_j = torch.arange(_size).to(device)\n            indices.append((idx_i, idx_j))\n\n        return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]\n\n\ndef build_matcher(args):\n    assert args.matcher_type in ['HungarianMatcher', 'SimpleMinsumMatcher'], \"Unknown args.matcher_type: {}\".format(args.matcher_type)\n    if args.matcher_type == 'HungarianMatcher':\n        return HungarianMatcher(\n            cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n            focal_alpha=args.focal_alpha\n        )\n    elif args.matcher_type == 'SimpleMinsumMatcher':\n        return SimpleMinsumMatcher(\n            cost_class=args.set_cost_class, cost_bbox=args.set_cost_bbox, cost_giou=args.set_cost_giou,\n            focal_alpha=args.focal_alpha\n        )    \n    else:\n        raise NotImplementedError(\"Unknown args.matcher_type: {}\".format(args.matcher_type))"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/position_encoding.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Conditional DETR\n# Copyright (c) 2021 Microsoft. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Copied from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# ------------------------------------------------------------------------\n\n\"\"\"\nVarious positional encodings for the transformer.\n\"\"\"\nimport math\nimport torch\nfrom torch import nn\n\nfrom .util.misc import NestedTensor\n\n\nclass PositionEmbeddingSine(nn.Module):\n    \"\"\"\n    This is a more standard version of the position embedding, very similar to the one\n    used by the Attention is all you need paper, generalized to work on images.\n    \"\"\"\n    def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):\n        super().__init__()\n        self.num_pos_feats = num_pos_feats\n        self.temperature = temperature\n        self.normalize = normalize\n        if scale is not None and normalize is False:\n            raise ValueError(\"normalize should be True if scale is passed\")\n        if scale is None:\n            scale = 2 * math.pi\n        self.scale = scale\n\n    def forward(self, tensor_list: NestedTensor):\n        x = tensor_list.tensors\n        mask = tensor_list.mask\n        assert mask is not None\n        not_mask = ~mask\n        y_embed = not_mask.cumsum(1, dtype=torch.float32)\n        x_embed = not_mask.cumsum(2, dtype=torch.float32)\n        if self.normalize:\n            eps = 1e-6\n            y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale\n            x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale\n\n        dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)\n        dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n\n        pos_x = x_embed[:, :, :, None] / dim_t\n        pos_y = y_embed[:, :, :, None] / dim_t\n        pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)\n        pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)\n        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n        return pos\n\nclass PositionEmbeddingSineHW(nn.Module):\n    \"\"\"\n    This is a more standard version of the position embedding, very similar to the one\n    used by the Attention is all you need paper, generalized to work on images.\n    \"\"\"\n    def __init__(self, num_pos_feats=64, temperatureH=10000, temperatureW=10000, normalize=False, scale=None):\n        super().__init__()\n        self.num_pos_feats = num_pos_feats\n        self.temperatureH = temperatureH\n        self.temperatureW = temperatureW\n        self.normalize = normalize\n        if scale is not None and normalize is False:\n            raise ValueError(\"normalize should be True if scale is passed\")\n        if scale is None:\n            scale = 2 * math.pi\n        self.scale = scale\n\n    def forward(self, tensor_list: NestedTensor):\n        x = tensor_list.tensors\n        mask = tensor_list.mask\n        assert mask is not None\n        not_mask = ~mask\n        y_embed = not_mask.cumsum(1, dtype=torch.float32)\n        x_embed = not_mask.cumsum(2, dtype=torch.float32)\n\n        # import ipdb; ipdb.set_trace()\n\n        if self.normalize:\n            eps = 1e-6\n            y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale\n            x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale\n\n        dim_tx = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)\n        dim_tx = self.temperatureW ** (2 * (dim_tx // 2) / self.num_pos_feats)\n        pos_x = x_embed[:, :, :, None] / dim_tx\n\n        dim_ty = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)\n        dim_ty = self.temperatureH ** (2 * (dim_ty // 2) / self.num_pos_feats)\n        pos_y = y_embed[:, :, :, None] / dim_ty\n\n        pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3)\n        pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3)\n        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n\n        # import ipdb; ipdb.set_trace()\n\n        return pos\n\nclass PositionEmbeddingLearned(nn.Module):\n    \"\"\"\n    Absolute pos embedding, learned.\n    \"\"\"\n    def __init__(self, num_pos_feats=256):\n        super().__init__()\n        self.row_embed = nn.Embedding(50, num_pos_feats)\n        self.col_embed = nn.Embedding(50, num_pos_feats)\n        self.reset_parameters()\n\n    def reset_parameters(self):\n        nn.init.uniform_(self.row_embed.weight)\n        nn.init.uniform_(self.col_embed.weight)\n\n    def forward(self, tensor_list: NestedTensor):\n        x = tensor_list.tensors\n        h, w = x.shape[-2:]\n        i = torch.arange(w, device=x.device)\n        j = torch.arange(h, device=x.device)\n        x_emb = self.col_embed(i)\n        y_emb = self.row_embed(j)\n        pos = torch.cat([\n            x_emb.unsqueeze(0).repeat(h, 1, 1),\n            y_emb.unsqueeze(1).repeat(1, w, 1),\n        ], dim=-1).permute(2, 0, 1).unsqueeze(0).repeat(x.shape[0], 1, 1, 1)\n        return pos\n\n\ndef build_position_encoding(args):\n    N_steps = args.hidden_dim // 2\n    if args.position_embedding in ('v2', 'sine'):\n        # TODO find a better way of exposing other arguments\n        position_embedding = PositionEmbeddingSineHW(\n            N_steps, \n            temperatureH=args.pe_temperatureH,\n            temperatureW=args.pe_temperatureW,\n            normalize=True\n        )\n    elif args.position_embedding in ('v3', 'learned'):\n        position_embedding = PositionEmbeddingLearned(N_steps)\n    else:\n        raise ValueError(f\"not supported {args.position_embedding}\")\n\n    return position_embedding\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/segmentation.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Conditional DETR\n# Copyright (c) 2021 Microsoft. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Copied from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.\n# ------------------------------------------------------------------------\n\n\"\"\"\nThis file provides the definition of the convolutional heads used to predict masks, as well as the losses\n\"\"\"\nimport io\nfrom collections import defaultdict\nfrom typing import List, Optional\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom PIL import Image\n\nfrom .util import box_ops\nfrom .util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list\n\ntry:\n    from panopticapi.utils import id2rgb, rgb2id\nexcept ImportError:\n    pass\n\n\nclass DETRsegm(nn.Module):\n    def __init__(self, detr, freeze_detr=False):\n        super().__init__()\n        self.detr = detr\n\n        if freeze_detr:\n            for p in self.parameters():\n                p.requires_grad_(False)\n\n        hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead\n        self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0.0)\n        self.mask_head = MaskHeadSmallConv(hidden_dim + nheads, [1024, 512, 256], hidden_dim)\n\n    def forward(self, samples: NestedTensor):\n        if isinstance(samples, (list, torch.Tensor)):\n            samples = nested_tensor_from_tensor_list(samples)\n        features, pos = self.detr.backbone(samples)\n\n        bs = features[-1].tensors.shape[0]\n\n        src, mask = features[-1].decompose()\n        assert mask is not None\n        src_proj = self.detr.input_proj(src)\n        hs, memory = self.detr.transformer(src_proj, mask, self.detr.query_embed.weight, pos[-1])\n\n        outputs_class = self.detr.class_embed(hs)\n        outputs_coord = self.detr.bbox_embed(hs).sigmoid()\n        out = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n        if self.detr.aux_loss:\n            out['aux_outputs'] = self.detr._set_aux_loss(outputs_class, outputs_coord)\n\n        # FIXME h_boxes takes the last one computed, keep this in mind\n        bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)\n\n        seg_masks = self.mask_head(src_proj, bbox_mask, [features[2].tensors, features[1].tensors, features[0].tensors])\n        outputs_seg_masks = seg_masks.view(bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1])\n\n        out[\"pred_masks\"] = outputs_seg_masks\n        return out\n\n\ndef _expand(tensor, length: int):\n    return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)\n\n\nclass MaskHeadSmallConv(nn.Module):\n    \"\"\"\n    Simple convolutional head, using group norm.\n    Upsampling is done using a FPN approach\n    \"\"\"\n\n    def __init__(self, dim, fpn_dims, context_dim):\n        super().__init__()\n\n        inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64]\n        self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)\n        self.gn1 = torch.nn.GroupNorm(8, dim)\n        self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)\n        self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])\n        self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)\n        self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])\n        self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)\n        self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])\n        self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)\n        self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])\n        self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)\n\n        self.dim = dim\n\n        self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)\n        self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)\n        self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                nn.init.kaiming_uniform_(m.weight, a=1)\n                nn.init.constant_(m.bias, 0)\n\n    def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]):\n        x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)\n\n        x = self.lay1(x)\n        x = self.gn1(x)\n        x = F.relu(x)\n        x = self.lay2(x)\n        x = self.gn2(x)\n        x = F.relu(x)\n\n        cur_fpn = self.adapter1(fpns[0])\n        if cur_fpn.size(0) != x.size(0):\n            cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))\n        x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode=\"nearest\")\n        x = self.lay3(x)\n        x = self.gn3(x)\n        x = F.relu(x)\n\n        cur_fpn = self.adapter2(fpns[1])\n        if cur_fpn.size(0) != x.size(0):\n            cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))\n        x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode=\"nearest\")\n        x = self.lay4(x)\n        x = self.gn4(x)\n        x = F.relu(x)\n\n        cur_fpn = self.adapter3(fpns[2])\n        if cur_fpn.size(0) != x.size(0):\n            cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))\n        x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode=\"nearest\")\n        x = self.lay5(x)\n        x = self.gn5(x)\n        x = F.relu(x)\n\n        x = self.out_lay(x)\n        return x\n\n\nclass MHAttentionMap(nn.Module):\n    \"\"\"This is a 2D attention module, which only returns the attention softmax (no multiplication by value)\"\"\"\n\n    def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True):\n        super().__init__()\n        self.num_heads = num_heads\n        self.hidden_dim = hidden_dim\n        self.dropout = nn.Dropout(dropout)\n\n        self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)\n        self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)\n\n        nn.init.zeros_(self.k_linear.bias)\n        nn.init.zeros_(self.q_linear.bias)\n        nn.init.xavier_uniform_(self.k_linear.weight)\n        nn.init.xavier_uniform_(self.q_linear.weight)\n        self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5\n\n    def forward(self, q, k, mask: Optional[Tensor] = None):\n        q = self.q_linear(q)\n        k = F.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias)\n        qh = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads)\n        kh = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1])\n        weights = torch.einsum(\"bqnc,bnchw->bqnhw\", qh * self.normalize_fact, kh)\n\n        if mask is not None:\n            weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float(\"-inf\"))\n        weights = F.softmax(weights.flatten(2), dim=-1).view(weights.size())\n        weights = self.dropout(weights)\n        return weights\n\n\ndef dice_loss(inputs, targets, num_boxes):\n    \"\"\"\n    Compute the DICE loss, similar to generalized IOU for masks\n    Args:\n        inputs: A float tensor of arbitrary shape.\n                The predictions for each example.\n        targets: A float tensor with the same shape as inputs. Stores the binary\n                 classification label for each element in inputs\n                (0 for the negative class and 1 for the positive class).\n    \"\"\"\n    inputs = inputs.sigmoid()\n    inputs = inputs.flatten(1)\n    numerator = 2 * (inputs * targets).sum(1)\n    denominator = inputs.sum(-1) + targets.sum(-1)\n    loss = 1 - (numerator + 1) / (denominator + 1)\n    return loss.sum() / num_boxes\n\n\ndef sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n    \"\"\"\n    Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n    Args:\n        inputs: A float tensor of arbitrary shape.\n                The predictions for each example.\n        targets: A float tensor with the same shape as inputs. Stores the binary\n                 classification label for each element in inputs\n                (0 for the negative class and 1 for the positive class).\n        alpha: (optional) Weighting factor in range (0,1) to balance\n                positive vs negative examples. Default = -1 (no weighting).\n        gamma: Exponent of the modulating factor (1 - p_t) to\n               balance easy vs hard examples.\n    Returns:\n        Loss tensor\n    \"\"\"\n    prob = inputs.sigmoid()\n    ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n    p_t = prob * targets + (1 - prob) * (1 - targets)\n    loss = ce_loss * ((1 - p_t) ** gamma)\n\n    if alpha >= 0:\n        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n        loss = alpha_t * loss\n\n    return loss.mean(1).sum() / num_boxes\n\n\nclass PostProcessSegm(nn.Module):\n    def __init__(self, threshold=0.5):\n        super().__init__()\n        self.threshold = threshold\n\n    @torch.no_grad()\n    def forward(self, results, outputs, orig_target_sizes, max_target_sizes):\n        assert len(orig_target_sizes) == len(max_target_sizes)\n        max_h, max_w = max_target_sizes.max(0)[0].tolist()\n        outputs_masks = outputs[\"pred_masks\"].squeeze(2)\n        outputs_masks = F.interpolate(outputs_masks, size=(max_h, max_w), mode=\"bilinear\", align_corners=False)\n        outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()\n\n        for i, (cur_mask, t, tt) in enumerate(zip(outputs_masks, max_target_sizes, orig_target_sizes)):\n            img_h, img_w = t[0], t[1]\n            results[i][\"masks\"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)\n            results[i][\"masks\"] = F.interpolate(\n                results[i][\"masks\"].float(), size=tuple(tt.tolist()), mode=\"nearest\"\n            ).byte()\n\n        return results\n\n\nclass PostProcessPanoptic(nn.Module):\n    \"\"\"This class converts the output of the model to the final panoptic result, in the format expected by the\n    coco panoptic API \"\"\"\n\n    def __init__(self, is_thing_map, threshold=0.85):\n        \"\"\"\n        Parameters:\n           is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether\n                          the class is  a thing (True) or a stuff (False) class\n           threshold: confidence threshold: segments with confidence lower than this will be deleted\n        \"\"\"\n        super().__init__()\n        self.threshold = threshold\n        self.is_thing_map = is_thing_map\n\n    def forward(self, outputs, processed_sizes, target_sizes=None):\n        \"\"\" This function computes the panoptic prediction from the model's predictions.\n        Parameters:\n            outputs: This is a dict coming directly from the model. See the model doc for the content.\n            processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the\n                             model, ie the size after data augmentation but before batching.\n            target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size\n                          of each prediction. If left to None, it will default to the processed_sizes\n            \"\"\"\n        if target_sizes is None:\n            target_sizes = processed_sizes\n        assert len(processed_sizes) == len(target_sizes)\n        out_logits, raw_masks, raw_boxes = outputs[\"pred_logits\"], outputs[\"pred_masks\"], outputs[\"pred_boxes\"]\n        assert len(out_logits) == len(raw_masks) == len(target_sizes)\n        preds = []\n\n        def to_tuple(tup):\n            if isinstance(tup, tuple):\n                return tup\n            return tuple(tup.cpu().tolist())\n\n        for cur_logits, cur_masks, cur_boxes, size, target_size in zip(\n            out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes\n        ):\n            # we filter empty queries and detection below threshold\n            scores, labels = cur_logits.softmax(-1).max(-1)\n            keep = labels.ne(outputs[\"pred_logits\"].shape[-1] - 1) & (scores > self.threshold)\n            cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)\n            cur_scores = cur_scores[keep]\n            cur_classes = cur_classes[keep]\n            cur_masks = cur_masks[keep]\n            cur_masks = interpolate(cur_masks[:, None], to_tuple(size), mode=\"bilinear\").squeeze(1)\n            cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])\n\n            h, w = cur_masks.shape[-2:]\n            assert len(cur_boxes) == len(cur_classes)\n\n            # It may be that we have several predicted masks for the same stuff class.\n            # In the following, we track the list of masks ids for each stuff class (they are merged later on)\n            cur_masks = cur_masks.flatten(1)\n            stuff_equiv_classes = defaultdict(lambda: [])\n            for k, label in enumerate(cur_classes):\n                if not self.is_thing_map[label.item()]:\n                    stuff_equiv_classes[label.item()].append(k)\n\n            def get_ids_area(masks, scores, dedup=False):\n                # This helper function creates the final panoptic segmentation image\n                # It also returns the area of the masks that appears on the image\n\n                m_id = masks.transpose(0, 1).softmax(-1)\n\n                if m_id.shape[-1] == 0:\n                    # We didn't detect any mask :(\n                    m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)\n                else:\n                    m_id = m_id.argmax(-1).view(h, w)\n\n                if dedup:\n                    # Merge the masks corresponding to the same stuff class\n                    for equiv in stuff_equiv_classes.values():\n                        if len(equiv) > 1:\n                            for eq_id in equiv:\n                                m_id.masked_fill_(m_id.eq(eq_id), equiv[0])\n\n                final_h, final_w = to_tuple(target_size)\n\n                seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))\n                seg_img = seg_img.resize(size=(final_w, final_h), resample=Image.NEAREST)\n\n                np_seg_img = (\n                    torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes())).view(final_h, final_w, 3).numpy()\n                )\n                m_id = torch.from_numpy(rgb2id(np_seg_img))\n\n                area = []\n                for i in range(len(scores)):\n                    area.append(m_id.eq(i).sum().item())\n                return area, seg_img\n\n            area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)\n            if cur_classes.numel() > 0:\n                # We know filter empty masks as long as we find some\n                while True:\n                    filtered_small = torch.as_tensor(\n                        [area[i] <= 4 for i, c in enumerate(cur_classes)], dtype=torch.bool, device=keep.device\n                    )\n                    if filtered_small.any().item():\n                        cur_scores = cur_scores[~filtered_small]\n                        cur_classes = cur_classes[~filtered_small]\n                        cur_masks = cur_masks[~filtered_small]\n                        area, seg_img = get_ids_area(cur_masks, cur_scores)\n                    else:\n                        break\n\n            else:\n                cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)\n\n            segments_info = []\n            for i, a in enumerate(area):\n                cat = cur_classes[i].item()\n                segments_info.append({\"id\": i, \"isthing\": self.is_thing_map[cat], \"category_id\": cat, \"area\": a})\n            del cur_classes\n\n            with io.BytesIO() as out:\n                seg_img.save(out, format=\"PNG\")\n                predictions = {\"png_string\": out.getvalue(), \"segments_info\": segments_info}\n            preds.append(predictions)\n        return preds\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/swin_transformer.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# --------------------------------------------------------\n# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py\n# --------------------------------------------------------\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as checkpoint\nimport numpy as np\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\nfrom .util.misc import NestedTensor\n\n\nclass Mlp(nn.Module):\n    \"\"\" Multilayer perceptron.\"\"\"\n\n    def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Linear(in_features, hidden_features)\n        self.act = act_layer()\n        self.fc2 = nn.Linear(hidden_features, out_features)\n        self.drop = nn.Dropout(drop)\n\n    def forward(self, x):\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\n\ndef window_partition(x, window_size):\n    \"\"\"\n    Args:\n        x: (B, H, W, C)\n        window_size (int): window size\n    Returns:\n        windows: (num_windows*B, window_size, window_size, C)\n    \"\"\"\n    B, H, W, C = x.shape\n    x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\n    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n    return windows\n\n\ndef window_reverse(windows, window_size, H, W):\n    \"\"\"\n    Args:\n        windows: (num_windows*B, window_size, window_size, C)\n        window_size (int): Window size\n        H (int): Height of image\n        W (int): Width of image\n    Returns:\n        x: (B, H, W, C)\n    \"\"\"\n    B = int(windows.shape[0] / (H * W / window_size / window_size))\n    x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)\n    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n    return x\n\n\nclass WindowAttention(nn.Module):\n    \"\"\" Window based multi-head self attention (W-MSA) module with relative position bias.\n    It supports both of shifted and non-shifted window.\n    Args:\n        dim (int): Number of input channels.\n        window_size (tuple[int]): The height and width of the window.\n        num_heads (int): Number of attention heads.\n        qkv_bias (bool, optional):  If True, add a learnable bias to query, key, value. Default: True\n        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set\n        attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0\n        proj_drop (float, optional): Dropout ratio of output. Default: 0.0\n    \"\"\"\n\n    def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):\n\n        super().__init__()\n        self.dim = dim\n        self.window_size = window_size  # Wh, Ww\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = qk_scale or head_dim ** -0.5\n\n        # define a parameter table of relative position bias\n        self.relative_position_bias_table = nn.Parameter(\n            torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads))  # 2*Wh-1 * 2*Ww-1, nH\n\n        # get pair-wise relative position index for each token inside the window\n        coords_h = torch.arange(self.window_size[0])\n        coords_w = torch.arange(self.window_size[1])\n        coords = torch.stack(torch.meshgrid([coords_h, coords_w]))  # 2, Wh, Ww\n        coords_flatten = torch.flatten(coords, 1)  # 2, Wh*Ww\n        relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :]  # 2, Wh*Ww, Wh*Ww\n        relative_coords = relative_coords.permute(1, 2, 0).contiguous()  # Wh*Ww, Wh*Ww, 2\n        relative_coords[:, :, 0] += self.window_size[0] - 1  # shift to start from 0\n        relative_coords[:, :, 1] += self.window_size[1] - 1\n        relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1\n        relative_position_index = relative_coords.sum(-1)  # Wh*Ww, Wh*Ww\n        self.register_buffer(\"relative_position_index\", relative_position_index)\n\n        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop)\n        self.proj = nn.Linear(dim, dim)\n        self.proj_drop = nn.Dropout(proj_drop)\n\n        trunc_normal_(self.relative_position_bias_table, std=.02)\n        self.softmax = nn.Softmax(dim=-1)\n\n    def forward(self, x, mask=None):\n        \"\"\" Forward function.\n        Args:\n            x: input features with shape of (num_windows*B, N, C)\n            mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n        \"\"\"\n        B_, N, C = x.shape\n        qkv = self.qkv(x).reshape(B_, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)\n        q, k, v = qkv[0], qkv[1], qkv[2]  # make torchscript happy (cannot use tensor as tuple)\n\n        q = q * self.scale\n        attn = (q @ k.transpose(-2, -1))\n\n        relative_position_bias = self.relative_position_bias_table[self.relative_position_index.view(-1)].view(\n            self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1)  # Wh*Ww,Wh*Ww,nH\n        relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous()  # nH, Wh*Ww, Wh*Ww\n        attn = attn + relative_position_bias.unsqueeze(0)\n\n        if mask is not None:\n            nW = mask.shape[0]\n            attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)\n            attn = attn.view(-1, self.num_heads, N, N)\n            attn = self.softmax(attn)\n        else:\n            attn = self.softmax(attn)\n\n        attn = self.attn_drop(attn)\n\n        x = (attn @ v).transpose(1, 2).reshape(B_, N, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n        return x\n\n\nclass SwinTransformerBlock(nn.Module):\n    \"\"\" Swin Transformer Block.\n    Args:\n        dim (int): Number of input channels.\n        num_heads (int): Number of attention heads.\n        window_size (int): Window size.\n        shift_size (int): Shift size for SW-MSA.\n        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n        drop (float, optional): Dropout rate. Default: 0.0\n        attn_drop (float, optional): Attention dropout rate. Default: 0.0\n        drop_path (float, optional): Stochastic depth rate. Default: 0.0\n        act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm\n    \"\"\"\n\n    def __init__(self, dim, num_heads, window_size=7, shift_size=0,\n                 mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0., drop_path=0.,\n                 act_layer=nn.GELU, norm_layer=nn.LayerNorm):\n        super().__init__()\n        self.dim = dim\n        self.num_heads = num_heads\n        self.window_size = window_size\n        self.shift_size = shift_size\n        self.mlp_ratio = mlp_ratio\n        assert 0 <= self.shift_size < self.window_size, \"shift_size must in 0-window_size\"\n\n        self.norm1 = norm_layer(dim)\n        self.attn = WindowAttention(\n            dim, window_size=to_2tuple(self.window_size), num_heads=num_heads,\n            qkv_bias=qkv_bias, qk_scale=qk_scale, attn_drop=attn_drop, proj_drop=drop)\n\n        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n        self.norm2 = norm_layer(dim)\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)\n\n        self.H = None\n        self.W = None\n\n    def forward(self, x, mask_matrix):\n        \"\"\" Forward function.\n        Args:\n            x: Input feature, tensor size (B, H*W, C).\n            H, W: Spatial resolution of the input feature.\n            mask_matrix: Attention mask for cyclic shift.\n        \"\"\"\n        B, L, C = x.shape\n        H, W = self.H, self.W\n        assert L == H * W, \"input feature has wrong size\"\n\n        shortcut = x\n        x = self.norm1(x)\n        x = x.view(B, H, W, C)\n\n        # pad feature maps to multiples of window size\n        pad_l = pad_t = 0\n        pad_r = (self.window_size - W % self.window_size) % self.window_size\n        pad_b = (self.window_size - H % self.window_size) % self.window_size\n        x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))\n        _, Hp, Wp, _ = x.shape\n\n        # cyclic shift\n        if self.shift_size > 0:\n            shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))\n            attn_mask = mask_matrix\n        else:\n            shifted_x = x\n            attn_mask = None\n\n        # partition windows\n        x_windows = window_partition(shifted_x, self.window_size)  # nW*B, window_size, window_size, C\n        x_windows = x_windows.view(-1, self.window_size * self.window_size, C)  # nW*B, window_size*window_size, C\n\n        # W-MSA/SW-MSA\n        attn_windows = self.attn(x_windows, mask=attn_mask)  # nW*B, window_size*window_size, C\n\n        # merge windows\n        attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)\n        shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp)  # B H' W' C\n\n        # reverse cyclic shift\n        if self.shift_size > 0:\n            x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))\n        else:\n            x = shifted_x\n\n        if pad_r > 0 or pad_b > 0:\n            x = x[:, :H, :W, :].contiguous()\n\n        x = x.view(B, H * W, C)\n\n        # FFN\n        x = shortcut + self.drop_path(x)\n        x = x + self.drop_path(self.mlp(self.norm2(x)))\n\n        return x\n\n\nclass PatchMerging(nn.Module):\n    \"\"\" Patch Merging Layer\n    Args:\n        dim (int): Number of input channels.\n        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm\n    \"\"\"\n    def __init__(self, dim, norm_layer=nn.LayerNorm):\n        super().__init__()\n        self.dim = dim\n        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n        self.norm = norm_layer(4 * dim)\n\n    def forward(self, x, H, W):\n        \"\"\" Forward function.\n        Args:\n            x: Input feature, tensor size (B, H*W, C).\n            H, W: Spatial resolution of the input feature.\n        \"\"\"\n        B, L, C = x.shape\n        assert L == H * W, \"input feature has wrong size\"\n\n        x = x.view(B, H, W, C)\n\n        # padding\n        pad_input = (H % 2 == 1) or (W % 2 == 1)\n        if pad_input:\n            x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))\n\n        x0 = x[:, 0::2, 0::2, :]  # B H/2 W/2 C\n        x1 = x[:, 1::2, 0::2, :]  # B H/2 W/2 C\n        x2 = x[:, 0::2, 1::2, :]  # B H/2 W/2 C\n        x3 = x[:, 1::2, 1::2, :]  # B H/2 W/2 C\n        x = torch.cat([x0, x1, x2, x3], -1)  # B H/2 W/2 4*C\n        x = x.view(B, -1, 4 * C)  # B H/2*W/2 4*C\n\n        x = self.norm(x)\n        x = self.reduction(x)\n\n        return x\n\n\nclass BasicLayer(nn.Module):\n    \"\"\" A basic Swin Transformer layer for one stage.\n    Args:\n        dim (int): Number of feature channels\n        depth (int): Depths of this stage.\n        num_heads (int): Number of attention head.\n        window_size (int): Local window size. Default: 7.\n        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.\n        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n        drop (float, optional): Dropout rate. Default: 0.0\n        attn_drop (float, optional): Attention dropout rate. Default: 0.0\n        drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n        norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n        downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n    \"\"\"\n\n    def __init__(self,\n                 dim,\n                 depth,\n                 num_heads,\n                 window_size=7,\n                 mlp_ratio=4.,\n                 qkv_bias=True,\n                 qk_scale=None,\n                 drop=0.,\n                 attn_drop=0.,\n                 drop_path=0.,\n                 norm_layer=nn.LayerNorm,\n                 downsample=None,\n                 use_checkpoint=False):\n        super().__init__()\n        self.window_size = window_size\n        self.shift_size = window_size // 2\n        self.depth = depth\n        self.use_checkpoint = use_checkpoint\n\n        # build blocks\n        self.blocks = nn.ModuleList([\n            SwinTransformerBlock(\n                dim=dim,\n                num_heads=num_heads,\n                window_size=window_size,\n                shift_size=0 if (i % 2 == 0) else window_size // 2,\n                mlp_ratio=mlp_ratio,\n                qkv_bias=qkv_bias,\n                qk_scale=qk_scale,\n                drop=drop,\n                attn_drop=attn_drop,\n                drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,\n                norm_layer=norm_layer)\n            for i in range(depth)])\n\n        # patch merging layer\n        if downsample is not None:\n            self.downsample = downsample(dim=dim, norm_layer=norm_layer)\n        else:\n            self.downsample = None\n\n    def forward(self, x, H, W):\n        \"\"\" Forward function.\n        Args:\n            x: Input feature, tensor size (B, H*W, C).\n            H, W: Spatial resolution of the input feature.\n        \"\"\"\n\n        # calculate attention mask for SW-MSA\n        Hp = int(np.ceil(H / self.window_size)) * self.window_size\n        Wp = int(np.ceil(W / self.window_size)) * self.window_size\n        img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device)  # 1 Hp Wp 1\n        h_slices = (slice(0, -self.window_size),\n                    slice(-self.window_size, -self.shift_size),\n                    slice(-self.shift_size, None))\n        w_slices = (slice(0, -self.window_size),\n                    slice(-self.window_size, -self.shift_size),\n                    slice(-self.shift_size, None))\n        cnt = 0\n        for h in h_slices:\n            for w in w_slices:\n                img_mask[:, h, w, :] = cnt\n                cnt += 1\n\n        mask_windows = window_partition(img_mask, self.window_size)  # nW, window_size, window_size, 1\n        mask_windows = mask_windows.view(-1, self.window_size * self.window_size)\n        attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n        attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0))\n\n        for blk in self.blocks:\n            blk.H, blk.W = H, W\n            if self.use_checkpoint:\n                x = checkpoint.checkpoint(blk, x, attn_mask)\n            else:\n                x = blk(x, attn_mask)\n        if self.downsample is not None:\n            x_down = self.downsample(x, H, W)\n            Wh, Ww = (H + 1) // 2, (W + 1) // 2\n            return x, H, W, x_down, Wh, Ww\n        else:\n            return x, H, W, x, H, W\n\n\nclass PatchEmbed(nn.Module):\n    \"\"\" Image to Patch Embedding\n    Args:\n        patch_size (int): Patch token size. Default: 4.\n        in_chans (int): Number of input image channels. Default: 3.\n        embed_dim (int): Number of linear projection output channels. Default: 96.\n        norm_layer (nn.Module, optional): Normalization layer. Default: None\n    \"\"\"\n\n    def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):\n        super().__init__()\n        patch_size = to_2tuple(patch_size)\n        self.patch_size = patch_size\n\n        self.in_chans = in_chans\n        self.embed_dim = embed_dim\n\n        self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)\n        if norm_layer is not None:\n            self.norm = norm_layer(embed_dim)\n        else:\n            self.norm = None\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        # padding\n        _, _, H, W = x.size()\n        if W % self.patch_size[1] != 0:\n            x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))\n        if H % self.patch_size[0] != 0:\n            x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))\n\n        x = self.proj(x)  # B C Wh Ww\n        if self.norm is not None:\n            Wh, Ww = x.size(2), x.size(3)\n            x = x.flatten(2).transpose(1, 2)\n            x = self.norm(x)\n            x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)\n\n        return x\n\n\nclass SwinTransformer(nn.Module):\n    \"\"\" Swin Transformer backbone.\n        A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`  -\n          https://arxiv.org/pdf/2103.14030\n    Args:\n        pretrain_img_size (int): Input image size for training the pretrained model,\n            used in absolute postion embedding. Default 224.\n        patch_size (int | tuple(int)): Patch size. Default: 4.\n        in_chans (int): Number of input image channels. Default: 3.\n        embed_dim (int): Number of linear projection output channels. Default: 96.\n        depths (tuple[int]): Depths of each Swin Transformer stage.\n        num_heads (tuple[int]): Number of attention head of each stage.\n        window_size (int): Window size. Default: 7.\n        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.\n        qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n        qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.\n        drop_rate (float): Dropout rate.\n        attn_drop_rate (float): Attention dropout rate. Default: 0.\n        drop_path_rate (float): Stochastic depth rate. Default: 0.2.\n        norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n        ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.\n        patch_norm (bool): If True, add normalization after patch embedding. Default: True.\n        out_indices (Sequence[int]): Output from which stages.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            -1 means not freezing any parameters.\n        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n        dilation (bool): if True, the output size if 16x downsample, ow 32x downsample.\n    \"\"\"\n\n    def __init__(self,\n                 pretrain_img_size=224,\n                 patch_size=4,\n                 in_chans=3,\n                 embed_dim=96,\n                 depths=[2, 2, 6, 2],\n                 num_heads=[3, 6, 12, 24],\n                 window_size=7,\n                 mlp_ratio=4.,\n                 qkv_bias=True,\n                 qk_scale=None,\n                 drop_rate=0.,\n                 attn_drop_rate=0.,\n                 drop_path_rate=0.2,\n                 norm_layer=nn.LayerNorm,\n                 ape=False,\n                 patch_norm=True,\n                 out_indices=(0, 1, 2, 3),\n                 frozen_stages=-1,\n                 dilation=False,\n                 use_checkpoint=False):\n        super().__init__()\n\n        self.pretrain_img_size = pretrain_img_size\n        self.num_layers = len(depths)\n        self.embed_dim = embed_dim\n        self.ape = ape\n        self.patch_norm = patch_norm\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n        self.dilation = dilation\n\n        if use_checkpoint:\n            print(\"use_checkpoint!!!!!!!!!!!!!!!!!!!!!!!!\")\n\n        # split image into non-overlapping patches\n        self.patch_embed = PatchEmbed(\n            patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,\n            norm_layer=norm_layer if self.patch_norm else None)\n\n        # absolute position embedding\n        if self.ape:\n            pretrain_img_size = to_2tuple(pretrain_img_size)\n            patch_size = to_2tuple(patch_size)\n            patches_resolution = [pretrain_img_size[0] // patch_size[0], pretrain_img_size[1] // patch_size[1]]\n\n            self.absolute_pos_embed = nn.Parameter(torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]))\n            trunc_normal_(self.absolute_pos_embed, std=.02)\n\n        self.pos_drop = nn.Dropout(p=drop_rate)\n\n        # stochastic depth\n        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]  # stochastic depth decay rule\n\n        # build layers\n        self.layers = nn.ModuleList()\n        # prepare downsample list\n        downsamplelist = [PatchMerging for i in range(self.num_layers)]\n        downsamplelist[-1] = None\n        num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]\n        if self.dilation:\n            downsamplelist[-2] = None\n            num_features[-1] = int(embed_dim * 2 ** (self.num_layers - 1)) // 2\n        for i_layer in range(self.num_layers):\n            layer = BasicLayer(\n                # dim=int(embed_dim * 2 ** i_layer),\n                dim=num_features[i_layer],\n                depth=depths[i_layer],\n                num_heads=num_heads[i_layer],\n                window_size=window_size,\n                mlp_ratio=mlp_ratio,\n                qkv_bias=qkv_bias,\n                qk_scale=qk_scale,\n                drop=drop_rate,\n                attn_drop=attn_drop_rate,\n                drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],\n                norm_layer=norm_layer,\n                # downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n                downsample=downsamplelist[i_layer],\n                use_checkpoint=use_checkpoint)\n            self.layers.append(layer)\n\n        # num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]\n        self.num_features = num_features\n\n        # add a norm layer for each output\n        for i_layer in out_indices:\n            layer = norm_layer(num_features[i_layer])\n            layer_name = f'norm{i_layer}'\n            self.add_module(layer_name, layer)\n\n        self._freeze_stages()\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            self.patch_embed.eval()\n            for param in self.patch_embed.parameters():\n                param.requires_grad = False\n\n        if self.frozen_stages >= 1 and self.ape:\n            self.absolute_pos_embed.requires_grad = False\n\n        if self.frozen_stages >= 2:\n            self.pos_drop.eval()\n            for i in range(0, self.frozen_stages - 1):\n                m = self.layers[i]\n                m.eval()\n                for param in m.parameters():\n                    param.requires_grad = False\n\n    # def init_weights(self, pretrained=None):\n    #     \"\"\"Initialize the weights in backbone.\n    #     Args:\n    #         pretrained (str, optional): Path to pre-trained weights.\n    #             Defaults to None.\n    #     \"\"\"\n\n    #     def _init_weights(m):\n    #         if isinstance(m, nn.Linear):\n    #             trunc_normal_(m.weight, std=.02)\n    #             if isinstance(m, nn.Linear) and m.bias is not None:\n    #                 nn.init.constant_(m.bias, 0)\n    #         elif isinstance(m, nn.LayerNorm):\n    #             nn.init.constant_(m.bias, 0)\n    #             nn.init.constant_(m.weight, 1.0)\n\n    #     if isinstance(pretrained, str):\n    #         self.apply(_init_weights)\n    #         logger = get_root_logger()\n    #         load_checkpoint(self, pretrained, strict=False, logger=logger)\n    #     elif pretrained is None:\n    #         self.apply(_init_weights)\n    #     else:\n    #         raise TypeError('pretrained must be a str or None')\n\n\n    def forward_raw(self, x):\n        \"\"\"Forward function.\"\"\"\n        x = self.patch_embed(x)\n\n        Wh, Ww = x.size(2), x.size(3)\n        if self.ape:\n            # interpolate the position embedding to the corresponding size\n            absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')\n            x = (x + absolute_pos_embed).flatten(2).transpose(1, 2)  # B Wh*Ww C\n        else:\n            x = x.flatten(2).transpose(1, 2)\n        x = self.pos_drop(x)\n\n        outs = []\n        for i in range(self.num_layers):\n            layer = self.layers[i]\n            x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)\n            # import ipdb; ipdb.set_trace()\n\n            if i in self.out_indices:\n                norm_layer = getattr(self, f'norm{i}')\n                x_out = norm_layer(x_out)\n\n                out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()\n                outs.append(out)\n        # in:\n        #   torch.Size([2, 3, 1024, 1024])\n        # outs:\n        #   [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \\\n        #       torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])]\n        return tuple(outs)\n\n\n    def forward(self, tensor_list: NestedTensor):\n        x = tensor_list.tensors\n\n        \"\"\"Forward function.\"\"\"\n        x = self.patch_embed(x)\n\n        Wh, Ww = x.size(2), x.size(3)\n        if self.ape:\n            # interpolate the position embedding to the corresponding size\n            absolute_pos_embed = F.interpolate(self.absolute_pos_embed, size=(Wh, Ww), mode='bicubic')\n            x = (x + absolute_pos_embed).flatten(2).transpose(1, 2)  # B Wh*Ww C\n        else:\n            x = x.flatten(2).transpose(1, 2)\n        x = self.pos_drop(x)\n\n        outs = []\n        for i in range(self.num_layers):\n            layer = self.layers[i]\n            x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)\n\n            if i in self.out_indices:\n                norm_layer = getattr(self, f'norm{i}')\n                x_out = norm_layer(x_out)\n\n                out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()\n                outs.append(out)\n        # in:\n        #   torch.Size([2, 3, 1024, 1024])\n        # out:\n        #   [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \\\n        #       torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])]\n\n        # collect for nesttensors        \n        outs_dict = {}\n        for idx, out_i in enumerate(outs):\n            m = tensor_list.mask\n            assert m is not None\n            mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0]\n            outs_dict[idx] = NestedTensor(out_i, mask)\n\n        return outs_dict\n\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode while keep layers freezed.\"\"\"\n        super(SwinTransformer, self).train(mode)\n        self._freeze_stages()\n\n\n\ndef build_swin_transformer(modelname, pretrain_img_size, **kw):\n    assert modelname in ['swin_T_224_1k', 'swin_B_224_22k', 'swin_B_384_22k', 'swin_L_224_22k', 'swin_L_384_22k']\n\n    model_para_dict = {\n        'swin_T_224_1k': dict(\n            embed_dim=96,\n            depths=[ 2, 2, 6, 2 ],\n            num_heads=[ 3, 6, 12, 24],\n            window_size=7\n        ),        \n        'swin_B_224_22k': dict(\n            embed_dim=128,\n            depths=[ 2, 2, 18, 2 ],\n            num_heads=[ 4, 8, 16, 32 ],\n            window_size=7\n        ),\n        'swin_B_384_22k': dict(\n            embed_dim=128,\n            depths=[ 2, 2, 18, 2 ],\n            num_heads=[ 4, 8, 16, 32 ],\n            window_size=12\n        ),\n        'swin_L_224_22k': dict(\n            embed_dim=192,\n            depths=[ 2, 2, 18, 2 ],\n            num_heads=[ 6, 12, 24, 48 ],\n            window_size=7\n        ),\n        'swin_L_384_22k': dict(\n            embed_dim=192,\n            depths=[ 2, 2, 18, 2 ],\n            num_heads=[ 6, 12, 24, 48 ],\n            window_size=12\n        ),\n    }\n    kw_cgf = model_para_dict[modelname]\n    kw_cgf.update(kw)\n    model = SwinTransformer(pretrain_img_size=pretrain_img_size, **kw_cgf)\n    return model\n\nif __name__ == \"__main__\":\n    model = build_swin_transformer('swin_L_384_22k', 384, dilation=True)\n    x = torch.rand(2, 3, 1024, 1024)\n    y = model.forward_raw(x)\n    import ipdb; ipdb.set_trace()\n    x = torch.rand(2, 3, 384, 384)\n    y = model.forward_raw(x)"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/transformer_deformable.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\nimport copy\nimport os\nfrom typing import Optional, List\nimport math\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\nfrom torch.nn.init import xavier_uniform_, constant_, uniform_, normal_\n\nfrom .util.misc import inverse_sigmoid\nfrom projects.instance_segment_anything.ops.modules import MSDeformAttn\n\nfrom .utils import sigmoid_focal_loss, MLP, _get_activation_fn, gen_sineembed_for_position\n\nclass DeformableTransformer(nn.Module):\n    def __init__(self, d_model=256, nhead=8,\n                 num_encoder_layers=6, num_decoder_layers=6, dim_feedforward=1024, dropout=0.1,\n                 activation=\"relu\", return_intermediate_dec=False,\n                 num_feature_levels=4, dec_n_points=4,  enc_n_points=4,\n                 two_stage=False, two_stage_num_proposals=300,\n                 use_dab=False, high_dim_query_update=False, no_sine_embed=False):\n        super().__init__()\n\n        self.d_model = d_model\n        self.nhead = nhead\n        self.two_stage = two_stage\n        self.two_stage_num_proposals = two_stage_num_proposals\n        self.use_dab = use_dab\n\n        encoder_layer = DeformableTransformerEncoderLayer(d_model, dim_feedforward,\n                                                          dropout, activation,\n                                                          num_feature_levels, nhead, enc_n_points)\n        self.encoder = DeformableTransformerEncoder(encoder_layer, num_encoder_layers)\n\n        decoder_layer = DeformableTransformerDecoderLayer(d_model, dim_feedforward,\n                                                          dropout, activation,\n                                                          num_feature_levels, nhead, dec_n_points)\n        self.decoder = DeformableTransformerDecoder(decoder_layer, num_decoder_layers, return_intermediate_dec, \n                                                            use_dab=use_dab, d_model=d_model, high_dim_query_update=high_dim_query_update, no_sine_embed=no_sine_embed)\n\n        self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))\n\n        if two_stage:\n            self.enc_output = nn.Linear(d_model, d_model)\n            self.enc_output_norm = nn.LayerNorm(d_model)\n            self.pos_trans = nn.Linear(d_model * 2, d_model * 2)\n            self.pos_trans_norm = nn.LayerNorm(d_model * 2)\n        else:\n            if not self.use_dab:\n                self.reference_points = nn.Linear(d_model, 2)\n\n        self.high_dim_query_update = high_dim_query_update\n        if high_dim_query_update:\n            assert not self.use_dab, \"use_dab must be True\"\n\n        self._reset_parameters()\n\n    def _reset_parameters(self):\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n        for m in self.modules():\n            if isinstance(m, MSDeformAttn):\n                m._reset_parameters()\n        if not self.two_stage and not self.use_dab:\n            xavier_uniform_(self.reference_points.weight.data, gain=1.0)\n            constant_(self.reference_points.bias.data, 0.)\n        normal_(self.level_embed)\n\n    def get_proposal_pos_embed(self, proposals):\n        num_pos_feats = 128\n        temperature = 10000\n        scale = 2 * math.pi\n\n        dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device)\n        dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)\n        # N, L, 4\n        proposals = proposals.sigmoid() * scale\n        # N, L, 4, 128\n        pos = proposals[:, :, :, None] / dim_t\n        # N, L, 4, 64, 2\n        pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)\n        return pos\n\n    def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):\n        N_, S_, C_ = memory.shape\n        base_scale = 4.0\n        proposals = []\n        _cur = 0\n        for lvl, (H_, W_) in enumerate(spatial_shapes):\n            mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)\n            valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n            valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n            grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),\n                                            torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))\n            grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n            scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)\n            grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale\n            wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)\n            proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)\n            proposals.append(proposal)\n            _cur += (H_ * W_)\n        output_proposals = torch.cat(proposals, 1)\n        output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)\n        output_proposals = torch.log(output_proposals / (1 - output_proposals))\n        output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))\n        output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))\n\n        output_memory = memory\n        output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n        output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n        output_memory = self.enc_output_norm(self.enc_output(output_memory))\n        return output_memory, output_proposals\n\n    def get_valid_ratio(self, mask):\n        _, H, W = mask.shape\n        valid_H = torch.sum(~mask[:, :, 0], 1)\n        valid_W = torch.sum(~mask[:, 0, :], 1)\n        valid_ratio_h = valid_H.float() / H\n        valid_ratio_w = valid_W.float() / W\n        valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n        return valid_ratio\n\n    def forward(self, srcs, masks, pos_embeds, query_embed=None):\n        \"\"\"\n        Input:\n            - srcs: List([bs, c, h, w])\n            - masks: List([bs, h, w])\n        \"\"\"\n        assert self.two_stage or query_embed is not None\n\n        # prepare input for encoder\n        src_flatten = []\n        mask_flatten = []\n        lvl_pos_embed_flatten = []\n        spatial_shapes = []\n        for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):\n            bs, c, h, w = src.shape\n            spatial_shape = (h, w)\n            spatial_shapes.append(spatial_shape)\n\n            src = src.flatten(2).transpose(1, 2)                # bs, hw, c\n            mask = mask.flatten(1)                              # bs, hw\n            pos_embed = pos_embed.flatten(2).transpose(1, 2)    # bs, hw, c\n            lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)\n            lvl_pos_embed_flatten.append(lvl_pos_embed)\n            src_flatten.append(src)\n            mask_flatten.append(mask)\n        src_flatten = torch.cat(src_flatten, 1)     # bs, \\sum{hxw}, c \n        mask_flatten = torch.cat(mask_flatten, 1)   # bs, \\sum{hxw}\n        lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n        spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=src_flatten.device)\n        level_start_index = torch.cat((spatial_shapes.new_zeros((1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))\n        valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)\n\n        # encoder\n        memory = self.encoder(src_flatten, spatial_shapes, level_start_index, valid_ratios, lvl_pos_embed_flatten, mask_flatten)\n        # import ipdb; ipdb.set_trace()\n\n        # prepare input for decoder\n        bs, _, c = memory.shape\n        if self.two_stage:\n            output_memory, output_proposals = self.gen_encoder_output_proposals(memory, mask_flatten, spatial_shapes)\n\n            # hack implementation for two-stage Deformable DETR\n            enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](output_memory)\n            enc_outputs_coord_unact = self.decoder.bbox_embed[self.decoder.num_layers](output_memory) + output_proposals\n\n            topk = self.two_stage_num_proposals\n            topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]\n            topk_coords_unact = torch.gather(enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4))\n            topk_coords_unact = topk_coords_unact.detach()\n            reference_points = topk_coords_unact.sigmoid()\n            init_reference_out = reference_points\n            pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact)))\n            query_embed, tgt = torch.split(pos_trans_out, c, dim=2)\n        elif self.use_dab:\n            reference_points = query_embed[..., self.d_model:].sigmoid() \n            tgt = query_embed[..., :self.d_model]\n            tgt = tgt.unsqueeze(0).expand(bs, -1, -1)\n            init_reference_out = reference_points\n        else:\n            query_embed, tgt = torch.split(query_embed, c, dim=1)\n            query_embed = query_embed.unsqueeze(0).expand(bs, -1, -1)\n            tgt = tgt.unsqueeze(0).expand(bs, -1, -1)\n            reference_points = self.reference_points(query_embed).sigmoid() \n                # bs, num_quires, 2\n            init_reference_out = reference_points\n\n        # decoder\n        # import ipdb; ipdb.set_trace()\n        hs, inter_references = self.decoder(tgt, reference_points, memory,\n                                            spatial_shapes, level_start_index, valid_ratios, \n                                            query_pos=query_embed if not self.use_dab else None, \n                                            src_padding_mask=mask_flatten)\n\n        inter_references_out = inter_references\n        if self.two_stage:\n            return hs, init_reference_out, inter_references_out, enc_outputs_class, enc_outputs_coord_unact\n        return hs, init_reference_out, inter_references_out, None, None\n\n\nclass DeformableTransformerEncoderLayer(nn.Module):\n    def __init__(self,\n                 d_model=256, d_ffn=1024,\n                 dropout=0.1, activation=\"relu\",\n                 n_levels=4, n_heads=8, n_points=4,\n                 add_channel_attention=False,\n                 use_deformable_box_attn=False,\n                 box_attn_type='roi_align',\n                 ):\n        super().__init__()\n\n        # self attention\n        if use_deformable_box_attn:\n            self.self_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)\n        else:\n            self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n        self.dropout1 = nn.Dropout(dropout)\n        self.norm1 = nn.LayerNorm(d_model)\n\n        # ffn\n        self.linear1 = nn.Linear(d_model, d_ffn)\n        self.activation = _get_activation_fn(activation, d_model=d_ffn)\n        self.dropout2 = nn.Dropout(dropout)\n        self.linear2 = nn.Linear(d_ffn, d_model)\n        self.dropout3 = nn.Dropout(dropout)\n        self.norm2 = nn.LayerNorm(d_model)\n\n        # channel attention\n        self.add_channel_attention = add_channel_attention\n        if add_channel_attention:\n            self.activ_channel = _get_activation_fn('dyrelu', d_model=d_model)\n            self.norm_channel = nn.LayerNorm(d_model)\n\n    @staticmethod\n    def with_pos_embed(tensor, pos):\n        return tensor if pos is None else tensor + pos\n\n    def forward_ffn(self, src):\n        src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))\n        src = src + self.dropout3(src2)\n        src = self.norm2(src)\n        return src\n\n    def forward(self, src, pos, reference_points, spatial_shapes, level_start_index, key_padding_mask=None):\n        # self attention\n        # import ipdb; ipdb.set_trace()\n        src2 = self.self_attn(self.with_pos_embed(src, pos), reference_points, src, spatial_shapes, level_start_index, key_padding_mask)\n        src = src + self.dropout1(src2)\n        src = self.norm1(src)\n\n        # ffn\n        src = self.forward_ffn(src)\n\n        # channel attn\n        if self.add_channel_attention:\n            src = self.norm_channel(src + self.activ_channel(src))\n\n        return src\n\n\nclass DeformableTransformerEncoder(nn.Module):\n    def __init__(self, encoder_layer, num_layers, norm=None):\n        super().__init__()\n        if num_layers > 0:\n            self.layers = _get_clones(encoder_layer, num_layers)\n        else:\n            self.layers = []\n            del encoder_layer\n        self.num_layers = num_layers\n        self.norm = norm\n\n    @staticmethod\n    def get_reference_points(spatial_shapes, valid_ratios, device):\n        reference_points_list = []\n        for lvl, (H_, W_) in enumerate(spatial_shapes):\n\n            ref_y, ref_x = torch.meshgrid(torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),\n                                          torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device))\n            ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)\n            ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)\n            ref = torch.stack((ref_x, ref_y), -1)\n            reference_points_list.append(ref)\n        reference_points = torch.cat(reference_points_list, 1)\n        reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n        return reference_points\n\n    def forward(self, src, spatial_shapes, level_start_index, valid_ratios, pos=None, padding_mask=None):\n        \"\"\"\n        Input:\n            - src: [bs, sum(hi*wi), 256]\n            - spatial_shapes: h,w of each level [num_level, 2]\n            - level_start_index: [num_level] start point of level in sum(hi*wi).\n            - valid_ratios: [bs, num_level, 2]\n            - pos: pos embed for src. [bs, sum(hi*wi), 256]\n            - padding_mask: [bs, sum(hi*wi)]\n        Intermedia:\n            - reference_points: [bs, sum(hi*wi), num_lebel, 2]\n        \"\"\"\n        output = src\n        # bs, sum(hi*wi), 256\n        # import ipdb; ipdb.set_trace()\n        if self.num_layers > 0:\n            reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=src.device)\n        for _, layer in enumerate(self.layers):\n            output = layer(output, pos, reference_points, spatial_shapes, level_start_index, padding_mask)\n\n        if self.norm is not None:\n            output = self.norm(output)\n\n        return output\n\n\nclass DeformableTransformerDecoderLayer(nn.Module):\n    def __init__(self, d_model=256, d_ffn=1024,\n                 dropout=0.1, activation=\"relu\",\n                 n_levels=4, n_heads=8, n_points=4,\n                 use_deformable_box_attn=False,\n                 box_attn_type='roi_align',\n                 key_aware_type=None,\n                 decoder_sa_type='ca',\n                 module_seq=['sa', 'ca', 'ffn'],\n                 ):\n        super().__init__()\n        self.module_seq = module_seq\n        assert sorted(module_seq) == ['ca', 'ffn', 'sa']\n\n        # cross attention\n        # self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n        if use_deformable_box_attn:\n            self.cross_attn = MSDeformableBoxAttention(d_model, n_levels, n_heads, n_boxes=n_points, used_func=box_attn_type)\n        else:\n            self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n        self.dropout1 = nn.Dropout(dropout)\n        self.norm1 = nn.LayerNorm(d_model)\n\n        # self attention\n        self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\n        self.dropout2 = nn.Dropout(dropout)\n        self.norm2 = nn.LayerNorm(d_model)\n\n        # ffn\n        self.linear1 = nn.Linear(d_model, d_ffn)\n        self.activation = _get_activation_fn(activation, d_model=d_ffn, batch_dim=1)\n        self.dropout3 = nn.Dropout(dropout)\n        self.linear2 = nn.Linear(d_ffn, d_model)\n        self.dropout4 = nn.Dropout(dropout)\n        self.norm3 = nn.LayerNorm(d_model)\n\n        self.key_aware_type = key_aware_type\n        self.key_aware_proj = None\n        self.decoder_sa_type = decoder_sa_type\n        assert decoder_sa_type in ['sa', 'ca_label', 'ca_content']\n\n        if decoder_sa_type == 'ca_content':\n            self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n\n\n\n\n    def rm_self_attn_modules(self):\n        self.self_attn = None\n        self.dropout2 = None\n        self.norm2 = None\n\n\n    @staticmethod\n    def with_pos_embed(tensor, pos):\n        return tensor if pos is None else tensor + pos\n\n    def forward_ffn(self, tgt):\n        tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))\n        tgt = tgt + self.dropout4(tgt2)\n        tgt = self.norm3(tgt)\n        return tgt\n\n    def forward_sa(self, \n                # for tgt\n                tgt: Optional[Tensor],  # nq, bs, d_model\n                tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\n                tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\n\n                # for memory\n                memory: Optional[Tensor] = None, # hw, bs, d_model\n                memory_key_padding_mask: Optional[Tensor] = None,\n                memory_level_start_index: Optional[Tensor] = None, # num_levels\n                memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n                memory_pos: Optional[Tensor] = None, # pos for memory\n\n                # sa\n                self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\n                cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\n            ):\n        # self attention\n        if self.self_attn is not None:\n            # import ipdb; ipdb.set_trace()\n            if self.decoder_sa_type == 'sa':\n                q = k = self.with_pos_embed(tgt, tgt_query_pos)\n                tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0]\n                tgt = tgt + self.dropout2(tgt2)\n                tgt = self.norm2(tgt)\n            elif self.decoder_sa_type == 'ca_label':\n                # import ipdb; ipdb.set_trace()\n                # q = self.with_pos_embed(tgt, tgt_query_pos)\n                bs = tgt.shape[1]\n                k = v = self.label_embedding.weight[:, None, :].repeat(1, bs, 1)\n                tgt2 = self.self_attn(tgt, k, v, attn_mask=self_attn_mask)[0]\n                tgt = tgt + self.dropout2(tgt2)\n                tgt = self.norm2(tgt)\n            elif self.decoder_sa_type == 'ca_content':\n                tgt2 = self.self_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),\n                            tgt_reference_points.transpose(0, 1).contiguous(),\n                            memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask).transpose(0, 1)\n                tgt = tgt + self.dropout2(tgt2)\n                tgt = self.norm2(tgt)\n            else:\n                raise NotImplementedError(\"Unknown decoder_sa_type {}\".format(self.decoder_sa_type))\n\n        return tgt            \n\n    def forward_ca(self, \n                # for tgt\n                tgt: Optional[Tensor],  # nq, bs, d_model\n                tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\n                tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\n\n                # for memory\n                memory: Optional[Tensor] = None, # hw, bs, d_model\n                memory_key_padding_mask: Optional[Tensor] = None,\n                memory_level_start_index: Optional[Tensor] = None, # num_levels\n                memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n                memory_pos: Optional[Tensor] = None, # pos for memory\n\n                # sa\n                self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\n                cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\n            ):\n        # cross attention\n        # import ipdb; ipdb.set_trace()\n        if self.key_aware_type is not None:\n\n            if self.key_aware_type == 'mean':\n                tgt = tgt + memory.mean(0, keepdim=True)\n            elif self.key_aware_type == 'proj_mean':\n                tgt = tgt + self.key_aware_proj(memory).mean(0, keepdim=True)\n            else:\n                raise NotImplementedError(\"Unknown key_aware_type: {}\".format(self.key_aware_type))\n        tgt2 = self.cross_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),\n                               tgt_reference_points.transpose(0, 1).contiguous(),\n                               memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask).transpose(0, 1)\n        tgt = tgt + self.dropout1(tgt2)\n        tgt = self.norm1(tgt)\n\n        return tgt  \n\n    def forward(self, \n                # for tgt\n                tgt: Optional[Tensor],  # nq, bs, d_model\n                tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\n                tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\n                tgt_key_padding_mask: Optional[Tensor] = None,\n                tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\n\n                # for memory\n                memory: Optional[Tensor] = None, # hw, bs, d_model\n                memory_key_padding_mask: Optional[Tensor] = None,\n                memory_level_start_index: Optional[Tensor] = None, # num_levels\n                memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n                memory_pos: Optional[Tensor] = None, # pos for memory\n\n                # sa\n                self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\n                cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\n            ):\n\n        for funcname in self.module_seq:\n            if funcname == 'ffn':\n                tgt = self.forward_ffn(tgt)\n            elif funcname == 'ca':\n                tgt = self.forward_ca(tgt, tgt_query_pos, tgt_query_sine_embed, \\\n                    tgt_key_padding_mask, tgt_reference_points, \\\n                        memory, memory_key_padding_mask, memory_level_start_index, \\\n                            memory_spatial_shapes, memory_pos, self_attn_mask, cross_attn_mask)\n            elif funcname == 'sa':\n                tgt = self.forward_sa(tgt, tgt_query_pos, tgt_query_sine_embed, \\\n                    tgt_key_padding_mask, tgt_reference_points, \\\n                        memory, memory_key_padding_mask, memory_level_start_index, \\\n                            memory_spatial_shapes, memory_pos, self_attn_mask, cross_attn_mask)\n            else:\n                raise ValueError('unknown funcname {}'.format(funcname))\n\n        return tgt\n\n    # def forward(self, \n    #             # for tgt\n    #             tgt: Optional[Tensor],  # nq, bs, d_model\n    #             tgt_query_pos: Optional[Tensor] = None, # pos for query. MLP(Sine(pos))\n    #             tgt_query_sine_embed: Optional[Tensor] = None, # pos for query. Sine(pos)\n    #             tgt_key_padding_mask: Optional[Tensor] = None,\n    #             tgt_reference_points: Optional[Tensor] = None, # nq, bs, 4\n\n    #             # for memory\n    #             memory: Optional[Tensor] = None, # hw, bs, d_model\n    #             memory_key_padding_mask: Optional[Tensor] = None,\n    #             memory_level_start_index: Optional[Tensor] = None, # num_levels\n    #             memory_spatial_shapes: Optional[Tensor] = None, # bs, num_levels, 2\n    #             memory_pos: Optional[Tensor] = None, # pos for memory\n\n    #             # sa\n    #             self_attn_mask: Optional[Tensor] = None, # mask used for self-attention\n    #             cross_attn_mask: Optional[Tensor] = None, # mask used for cross-attention\n    #         ):\n    #     \"\"\"\n    #     Input:\n    #         - tgt/tgt_query_pos: nq, bs, d_model\n    #         - \n    #     \"\"\"\n    #     assert cross_attn_mask is None\n\n    #     # self attention\n    #     if self.self_attn is not None:\n    #         # import ipdb; ipdb.set_trace()\n    #         if self.decoder_sa_type == 'sa':\n    #             q = k = self.with_pos_embed(tgt, tgt_query_pos)\n    #             tgt2 = self.self_attn(q, k, tgt, attn_mask=self_attn_mask)[0]\n    #             tgt = tgt + self.dropout2(tgt2)\n    #             tgt = self.norm2(tgt)\n    #         elif self.decoder_sa_type == 'ca_label':\n    #             # import ipdb; ipdb.set_trace()\n    #             # q = self.with_pos_embed(tgt, tgt_query_pos)\n    #             bs = tgt.shape[1]\n    #             k = v = self.label_embedding.weight[:, None, :].repeat(1, bs, 1)\n    #             tgt2 = self.self_attn(tgt, k, v, attn_mask=self_attn_mask)[0]\n    #             tgt = tgt + self.dropout2(tgt2)\n    #             tgt = self.norm2(tgt)\n    #         elif self.decoder_sa_type == 'ca_content':\n    #             tgt2 = self.self_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),\n    #                         tgt_reference_points.transpose(0, 1).contiguous(),\n    #                         memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask).transpose(0, 1)\n    #             tgt = tgt + self.dropout2(tgt2)\n    #             tgt = self.norm2(tgt)\n    #         else:\n    #             raise NotImplementedError(\"Unknown decoder_sa_type {}\".format(self.decoder_sa_type))\n\n\n    #     # cross attention\n    #     # import ipdb; ipdb.set_trace()\n    #     if self.key_aware_type is not None:\n    #         if self.key_aware_type == 'mean':\n    #             tgt = tgt + memory.mean(0, keepdim=True)\n    #         elif self.key_aware_type == 'proj_mean':\n    #             tgt = tgt + self.key_aware_proj(memory).mean(0, keepdim=True)\n    #         else:\n    #             raise NotImplementedError(\"Unknown key_aware_type: {}\".format(self.key_aware_type))\n    #     tgt2 = self.cross_attn(self.with_pos_embed(tgt, tgt_query_pos).transpose(0, 1),\n    #                            tgt_reference_points.transpose(0, 1).contiguous(),\n    #                            memory.transpose(0, 1), memory_spatial_shapes, memory_level_start_index, memory_key_padding_mask).transpose(0, 1)\n    #     tgt = tgt + self.dropout1(tgt2)\n    #     tgt = self.norm1(tgt)\n\n    #     # ffn\n    #     tgt = self.forward_ffn(tgt)\n\n    #     return tgt\n\n\nclass DeformableTransformerDecoder(nn.Module):\n    def __init__(self, decoder_layer, num_layers, return_intermediate=False, use_dab=False, d_model=256, query_dim=4):\n        super().__init__()\n        self.layers = _get_clones(decoder_layer, num_layers)\n        self.num_layers = num_layers\n        self.return_intermediate = return_intermediate\n        assert return_intermediate\n        # hack implementation for iterative bounding box refinement and two-stage Deformable DETR\n        self.bbox_embed = None\n        self.class_embed = None\n        self.use_dab = use_dab\n        self.d_model = d_model\n        self.query_dim = query_dim\n        if use_dab:\n            self.query_scale = MLP(d_model, d_model, d_model, 2)\n            self.ref_point_head = MLP(2 * d_model, d_model, d_model, 2)\n\n\n    def forward(self, tgt, reference_points, src, src_spatial_shapes,       \n                src_level_start_index, src_valid_ratios,\n                query_pos=None, src_padding_mask=None):\n        output = tgt\n        if self.use_dab:\n            assert query_pos is None\n\n        intermediate = []\n        intermediate_reference_points = [reference_points]\n        for layer_id, layer in enumerate(self.layers):\n            # import ipdb; ipdb.set_trace()\n            if reference_points.shape[-1] == 4:\n                reference_points_input = reference_points[:, :, None] \\\n                                         * torch.cat([src_valid_ratios, src_valid_ratios], -1)[:, None] # bs, nq, 4, 4\n            else:\n                assert reference_points.shape[-1] == 2\n                reference_points_input = reference_points[:, :, None] * src_valid_ratios[:, None]\n                \n            if self.use_dab:\n                # import ipdb; ipdb.set_trace()\n                query_sine_embed = gen_sineembed_for_position(reference_points_input[:, :, 0, :]) # bs, nq, 256*2 \n                raw_query_pos = self.ref_point_head(query_sine_embed) # bs, nq, 256\n                pos_scale = self.query_scale(output) if layer_id != 0 else 1\n                query_pos = pos_scale * raw_query_pos\n        \n            output = layer(output, query_pos, reference_points_input, src, src_spatial_shapes, src_level_start_index, src_padding_mask)\n\n            # hack implementation for iterative bounding box refinement\n            if self.bbox_embed is not None:\n                box_holder = self.bbox_embed(output)\n                box_holder[..., :self.query_dim] += inverse_sigmoid(reference_points)\n                new_reference_points = box_holder[..., :self.query_dim].sigmoid()\n                reference_points = new_reference_points.detach()\n                if layer_id != self.num_layers - 1:\n                    intermediate_reference_points.append(new_reference_points)\n\n            intermediate.append(output)\n\n        return torch.stack(intermediate), torch.stack(intermediate_reference_points)\n\n\ndef _get_clones(module, N):\n    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef build_deforamble_transformer(args):\n    return DeformableTransformer(\n        d_model=args.hidden_dim,\n        nhead=args.nheads,\n        num_encoder_layers=args.enc_layers,\n        num_decoder_layers=args.dec_layers,\n        dim_feedforward=args.dim_feedforward,\n        dropout=args.dropout,\n        activation=\"relu\",\n        return_intermediate_dec=True,\n        num_feature_levels=args.ddetr_num_feature_levels,\n        dec_n_points=args.ddetr_dec_n_points,\n        enc_n_points=args.ddetr_enc_n_points,\n        two_stage=args.ddetr_two_stage,\n        two_stage_num_proposals=args.num_queries,\n        use_dab=args.ddetr_use_dab,\n        high_dim_query_update=args.ddetr_high_dim_query_update,\n        no_sine_embed=args.ddetr_no_sine_embed)\n\n\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/__init__.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/box_loss.py",
    "content": "# borrow from https://github.com/Zzh-tju/CIoU/blob/master/layers/modules/multibox_loss.py\n\nimport torch, math\n\n\n\ndef ciou(bboxes1, bboxes2):\n    bboxes1 = torch.sigmoid(bboxes1)\n    bboxes2 = torch.sigmoid(bboxes2)\n    rows = bboxes1.shape[0]\n    cols = bboxes2.shape[0]\n    cious = torch.zeros((rows, cols))\n    if rows * cols == 0:\n        return cious\n    exchange = False\n    if bboxes1.shape[0] > bboxes2.shape[0]:\n        bboxes1, bboxes2 = bboxes2, bboxes1\n        cious = torch.zeros((cols, rows))\n        exchange = True\n    w1 = torch.exp(bboxes1[:, 2])\n    h1 = torch.exp(bboxes1[:, 3])\n    w2 = torch.exp(bboxes2[:, 2])\n    h2 = torch.exp(bboxes2[:, 3])\n    area1 = w1 * h1\n    area2 = w2 * h2\n    center_x1 = bboxes1[:, 0]\n    center_y1 = bboxes1[:, 1]\n    center_x2 = bboxes2[:, 0]\n    center_y2 = bboxes2[:, 1]\n\n    inter_l = torch.max(center_x1 - w1 / 2,center_x2 - w2 / 2)\n    inter_r = torch.min(center_x1 + w1 / 2,center_x2 + w2 / 2)\n    inter_t = torch.max(center_y1 - h1 / 2,center_y2 - h2 / 2)\n    inter_b = torch.min(center_y1 + h1 / 2,center_y2 + h2 / 2)\n    inter_area = torch.clamp((inter_r - inter_l),min=0) * torch.clamp((inter_b - inter_t),min=0)\n\n    c_l = torch.min(center_x1 - w1 / 2,center_x2 - w2 / 2)\n    c_r = torch.max(center_x1 + w1 / 2,center_x2 + w2 / 2)\n    c_t = torch.min(center_y1 - h1 / 2,center_y2 - h2 / 2)\n    c_b = torch.max(center_y1 + h1 / 2,center_y2 + h2 / 2)\n\n    inter_diag = (center_x2 - center_x1)**2 + (center_y2 - center_y1)**2\n    c_diag = torch.clamp((c_r - c_l),min=0)**2 + torch.clamp((c_b - c_t),min=0)**2\n\n    union = area1+area2-inter_area\n    u = (inter_diag) / c_diag\n    iou = inter_area / union\n    v = (4 / (math.pi ** 2)) * torch.pow((torch.atan(w2 / h2) - torch.atan(w1 / h1)), 2)\n    with torch.no_grad():\n        S = (iou>0.5).float()\n        alpha= S*v/(1-iou+v)\n    cious = iou - u - alpha * v\n    cious = torch.clamp(cious,min=-1.0,max = 1.0)\n    if exchange:\n        cious = cious.T\n    return 1-cious\n\ndef diou(bboxes1, bboxes2):\n    bboxes1 = torch.sigmoid(bboxes1)\n    bboxes2 = torch.sigmoid(bboxes2)\n    rows = bboxes1.shape[0]\n    cols = bboxes2.shape[0]\n    cious = torch.zeros((rows, cols))\n    if rows * cols == 0:\n        return cious\n    exchange = False\n    if bboxes1.shape[0] > bboxes2.shape[0]:\n        bboxes1, bboxes2 = bboxes2, bboxes1\n        cious = torch.zeros((cols, rows))\n        exchange = True\n    w1 = torch.exp(bboxes1[:, 2])\n    h1 = torch.exp(bboxes1[:, 3])\n    w2 = torch.exp(bboxes2[:, 2])\n    h2 = torch.exp(bboxes2[:, 3])\n    area1 = w1 * h1\n    area2 = w2 * h2\n    center_x1 = bboxes1[:, 0]\n    center_y1 = bboxes1[:, 1]\n    center_x2 = bboxes2[:, 0]\n    center_y2 = bboxes2[:, 1]\n\n    inter_l = torch.max(center_x1 - w1 / 2,center_x2 - w2 / 2)\n    inter_r = torch.min(center_x1 + w1 / 2,center_x2 + w2 / 2)\n    inter_t = torch.max(center_y1 - h1 / 2,center_y2 - h2 / 2)\n    inter_b = torch.min(center_y1 + h1 / 2,center_y2 + h2 / 2)\n    inter_area = torch.clamp((inter_r - inter_l),min=0) * torch.clamp((inter_b - inter_t),min=0)\n\n    c_l = torch.min(center_x1 - w1 / 2,center_x2 - w2 / 2)\n    c_r = torch.max(center_x1 + w1 / 2,center_x2 + w2 / 2)\n    c_t = torch.min(center_y1 - h1 / 2,center_y2 - h2 / 2)\n    c_b = torch.max(center_y1 + h1 / 2,center_y2 + h2 / 2)\n\n    inter_diag = (center_x2 - center_x1)**2 + (center_y2 - center_y1)**2\n    c_diag = torch.clamp((c_r - c_l),min=0)**2 + torch.clamp((c_b - c_t),min=0)**2\n\n    union = area1+area2-inter_area\n    u = (inter_diag) / c_diag\n    iou = inter_area / union\n    dious = iou - u\n    dious = torch.clamp(dious,min=-1.0,max = 1.0)\n    if exchange:\n        dious = dious.T\n    return 1-dious\n\n\nif __name__ == \"__main__\":\n    x = torch.rand(10, 4)\n    y = torch.rand(10,4)\n    import ipdb;ipdb.set_trace()\n    cxy = ciou(x, y)\n    dxy = diou(x, y)\n    print(cxy.shape, dxy.shape)\n    import ipdb; ipdb.set_trace()"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/box_ops.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nUtilities for bounding box manipulation and GIoU.\n\"\"\"\nimport torch, os\nfrom torchvision.ops.boxes import box_area\n\n\ndef box_cxcywh_to_xyxy(x):\n    x_c, y_c, w, h = x.unbind(-1)\n    b = [(x_c - 0.5 * w), (y_c - 0.5 * h),\n         (x_c + 0.5 * w), (y_c + 0.5 * h)]\n    return torch.stack(b, dim=-1)\n\n\ndef box_xyxy_to_cxcywh(x):\n    x0, y0, x1, y1 = x.unbind(-1)\n    b = [(x0 + x1) / 2, (y0 + y1) / 2,\n         (x1 - x0), (y1 - y0)]\n    return torch.stack(b, dim=-1)\n\n\n# modified from torchvision to also return the union\ndef box_iou(boxes1, boxes2):\n    area1 = box_area(boxes1)\n    area2 = box_area(boxes2)\n\n    # import ipdb; ipdb.set_trace()\n    lt = torch.max(boxes1[:, None, :2], boxes2[:, :2])  # [N,M,2]\n    rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])  # [N,M,2]\n\n    wh = (rb - lt).clamp(min=0)  # [N,M,2]\n    inter = wh[:, :, 0] * wh[:, :, 1]  # [N,M]\n\n    union = area1[:, None] + area2 - inter\n\n    iou = inter / (union + 1e-6)\n    return iou, union\n\n\ndef generalized_box_iou(boxes1, boxes2):\n    \"\"\"\n    Generalized IoU from https://giou.stanford.edu/\n\n    The boxes should be in [x0, y0, x1, y1] format\n\n    Returns a [N, M] pairwise matrix, where N = len(boxes1)\n    and M = len(boxes2)\n    \"\"\"\n    # degenerate boxes gives inf / nan results\n    # so do an early check\n    assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n    assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n    # except:\n    #     import ipdb; ipdb.set_trace()\n    iou, union = box_iou(boxes1, boxes2)\n\n    lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])\n    rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])\n\n    wh = (rb - lt).clamp(min=0)  # [N,M,2]\n    area = wh[:, :, 0] * wh[:, :, 1]\n\n    return iou - (area - union) / (area + 1e-6)\n\n\n\n# modified from torchvision to also return the union\ndef box_iou_pairwise(boxes1, boxes2):\n    area1 = box_area(boxes1)\n    area2 = box_area(boxes2)\n\n    lt = torch.max(boxes1[:, :2], boxes2[:, :2])  # [N,2]\n    rb = torch.min(boxes1[:, 2:], boxes2[:, 2:])  # [N,2]\n\n    wh = (rb - lt).clamp(min=0)  # [N,2]\n    inter = wh[:, 0] * wh[:, 1]  # [N]\n\n    union = area1 + area2 - inter\n\n    iou = inter / union\n    return iou, union\n\n\ndef generalized_box_iou_pairwise(boxes1, boxes2):\n    \"\"\"\n    Generalized IoU from https://giou.stanford.edu/\n\n    Input:\n        - boxes1, boxes2: N,4\n    Output:\n        - giou: N, 4\n    \"\"\"\n    # degenerate boxes gives inf / nan results\n    # so do an early check\n    assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n    assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n    assert boxes1.shape == boxes2.shape\n    iou, union = box_iou_pairwise(boxes1, boxes2) # N, 4\n\n    lt = torch.min(boxes1[:, :2], boxes2[:, :2])\n    rb = torch.max(boxes1[:, 2:], boxes2[:, 2:])\n\n    wh = (rb - lt).clamp(min=0)  # [N,2]\n    area = wh[:, 0] * wh[:, 1]\n\n    return iou - (area - union) / area\n\ndef masks_to_boxes(masks):\n    \"\"\"Compute the bounding boxes around the provided masks\n\n    The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.\n\n    Returns a [N, 4] tensors, with the boxes in xyxy format\n    \"\"\"\n    if masks.numel() == 0:\n        return torch.zeros((0, 4), device=masks.device)\n\n    h, w = masks.shape[-2:]\n\n    y = torch.arange(0, h, dtype=torch.float)\n    x = torch.arange(0, w, dtype=torch.float)\n    y, x = torch.meshgrid(y, x)\n\n    x_mask = (masks * x.unsqueeze(0))\n    x_max = x_mask.flatten(1).max(-1)[0]\n    x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]\n\n    y_mask = (masks * y.unsqueeze(0))\n    y_max = y_mask.flatten(1).max(-1)[0]\n    y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]\n\n    return torch.stack([x_min, y_min, x_max, y_max], 1)\n\nif __name__ == '__main__':\n    x = torch.rand(5, 4)\n    y = torch.rand(3, 4)\n    iou, union = box_iou(x, y)\n    import ipdb; ipdb.set_trace()"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/coco_id2name.json",
    "content": "{\"1\": \"person\", \"2\": \"bicycle\", \"3\": \"car\", \"4\": \"motorcycle\", \"5\": \"airplane\", \"6\": \"bus\", \"7\": \"train\", \"8\": \"truck\", \"9\": \"boat\", \"10\": \"traffic light\", \"11\": \"fire hydrant\", \"13\": \"stop sign\", \"14\": \"parking meter\", \"15\": \"bench\", \"16\": \"bird\", \"17\": \"cat\", \"18\": \"dog\", \"19\": \"horse\", \"20\": \"sheep\", \"21\": \"cow\", \"22\": \"elephant\", \"23\": \"bear\", \"24\": \"zebra\", \"25\": \"giraffe\", \"27\": \"backpack\", \"28\": \"umbrella\", \"31\": \"handbag\", \"32\": \"tie\", \"33\": \"suitcase\", \"34\": \"frisbee\", \"35\": \"skis\", \"36\": \"snowboard\", \"37\": \"sports ball\", \"38\": \"kite\", \"39\": \"baseball bat\", \"40\": \"baseball glove\", \"41\": \"skateboard\", \"42\": \"surfboard\", \"43\": \"tennis racket\", \"44\": \"bottle\", \"46\": \"wine glass\", \"47\": \"cup\", \"48\": \"fork\", \"49\": \"knife\", \"50\": \"spoon\", \"51\": \"bowl\", \"52\": \"banana\", \"53\": \"apple\", \"54\": \"sandwich\", \"55\": \"orange\", \"56\": \"broccoli\", \"57\": \"carrot\", \"58\": \"hot dog\", \"59\": \"pizza\", \"60\": \"donut\", \"61\": \"cake\", \"62\": \"chair\", \"63\": \"couch\", \"64\": \"potted plant\", \"65\": \"bed\", \"67\": \"dining table\", \"70\": \"toilet\", \"72\": \"tv\", \"73\": \"laptop\", \"74\": \"mouse\", \"75\": \"remote\", \"76\": \"keyboard\", \"77\": \"cell phone\", \"78\": \"microwave\", \"79\": \"oven\", \"80\": \"toaster\", \"81\": \"sink\", \"82\": \"refrigerator\", \"84\": \"book\", \"85\": \"clock\", \"86\": \"vase\", \"87\": \"scissors\", \"88\": \"teddy bear\", \"89\": \"hair drier\", \"90\": \"toothbrush\"}"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/get_param_dicts.py",
    "content": "import json\nimport torch\nimport torch.nn as nn\n\n\ndef match_name_keywords(n: str, name_keywords: list):\n    out = False\n    for b in name_keywords:\n        if b in n:\n            out = True\n            break\n    return out\n\n\ndef get_param_dict(args, model_without_ddp: nn.Module):\n    try:\n        param_dict_type = args.param_dict_type\n    except:\n        param_dict_type = 'default'\n    assert param_dict_type in ['default', 'ddetr_in_mmdet', 'large_wd']\n\n    # by default\n    if param_dict_type == 'default':\n        param_dicts = [\n            {\"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" not in n and p.requires_grad]},\n            {\n                \"params\": [p for n, p in model_without_ddp.named_parameters() if \"backbone\" in n and p.requires_grad],\n                \"lr\": args.lr_backbone,\n            }\n        ]\n        return param_dicts\n\n    if param_dict_type == 'ddetr_in_mmdet':\n        param_dicts = [\n            {\n                \"params\":\n                    [p for n, p in model_without_ddp.named_parameters()\n                        if not match_name_keywords(n, args.lr_backbone_names) and not match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],\n                \"lr\": args.lr,\n            },\n            {\n                \"params\": [p for n, p in model_without_ddp.named_parameters() \n                        if match_name_keywords(n, args.lr_backbone_names) and p.requires_grad],\n                \"lr\": args.lr_backbone,\n            },\n            {\n                \"params\": [p for n, p in model_without_ddp.named_parameters() \n                        if match_name_keywords(n, args.lr_linear_proj_names) and p.requires_grad],\n                \"lr\": args.lr * args.lr_linear_proj_mult,\n            }\n        ]        \n        return param_dicts\n\n    if param_dict_type == 'large_wd':\n        param_dicts = [\n                {\n                    \"params\":\n                        [p for n, p in model_without_ddp.named_parameters()\n                            if not match_name_keywords(n, ['backbone']) and not match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],\n                },\n                {\n                    \"params\": [p for n, p in model_without_ddp.named_parameters() \n                            if match_name_keywords(n, ['backbone']) and match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],\n                    \"lr\": args.lr_backbone,\n                    \"weight_decay\": 0.0,\n                },\n                {\n                    \"params\": [p for n, p in model_without_ddp.named_parameters() \n                            if match_name_keywords(n, ['backbone']) and not match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],\n                    \"lr\": args.lr_backbone,\n                    \"weight_decay\": args.weight_decay,\n                },\n                {\n                    \"params\":\n                        [p for n, p in model_without_ddp.named_parameters()\n                            if not match_name_keywords(n, ['backbone']) and match_name_keywords(n, ['norm', 'bias']) and p.requires_grad],\n                    \"lr\": args.lr,\n                    \"weight_decay\": 0.0,\n                }\n            ]\n\n        # print(\"param_dicts: {}\".format(param_dicts))\n\n    return param_dicts"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/logger.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport functools\nimport logging\nimport os\nimport sys\nfrom termcolor import colored\n\n\nclass _ColorfulFormatter(logging.Formatter):\n    def __init__(self, *args, **kwargs):\n        self._root_name = kwargs.pop(\"root_name\") + \".\"\n        self._abbrev_name = kwargs.pop(\"abbrev_name\", \"\")\n        if len(self._abbrev_name):\n            self._abbrev_name = self._abbrev_name + \".\"\n        super(_ColorfulFormatter, self).__init__(*args, **kwargs)\n\n    def formatMessage(self, record):\n        record.name = record.name.replace(self._root_name, self._abbrev_name)\n        log = super(_ColorfulFormatter, self).formatMessage(record)\n        if record.levelno == logging.WARNING:\n            prefix = colored(\"WARNING\", \"red\", attrs=[\"blink\"])\n        elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:\n            prefix = colored(\"ERROR\", \"red\", attrs=[\"blink\", \"underline\"])\n        else:\n            return log\n        return prefix + \" \" + log\n\n\n# so that calling setup_logger multiple times won't add many handlers\n@functools.lru_cache()\ndef setup_logger(\n    output=None, distributed_rank=0, *, color=True, name=\"imagenet\", abbrev_name=None\n):\n    \"\"\"\n    Initialize the detectron2 logger and set its verbosity level to \"INFO\".\n\n    Args:\n        output (str): a file name or a directory to save log. If None, will not save log file.\n            If ends with \".txt\" or \".log\", assumed to be a file name.\n            Otherwise, logs will be saved to `output/log.txt`.\n        name (str): the root module name of this logger\n\n    Returns:\n        logging.Logger: a logger\n    \"\"\"\n    logger = logging.getLogger(name)\n    logger.setLevel(logging.DEBUG)\n    logger.propagate = False\n\n    if abbrev_name is None:\n        abbrev_name = name\n\n    plain_formatter = logging.Formatter(\n        '[%(asctime)s.%(msecs)03d]: %(message)s',\n        datefmt='%m/%d %H:%M:%S'\n    )\n    # stdout logging: master only\n    if distributed_rank == 0:\n        ch = logging.StreamHandler(stream=sys.stdout)\n        ch.setLevel(logging.DEBUG)\n        if color:\n            formatter = _ColorfulFormatter(\n                colored(\"[%(asctime)s.%(msecs)03d]: \", \"green\") + \"%(message)s\",\n                datefmt=\"%m/%d %H:%M:%S\",\n                root_name=name,\n                abbrev_name=str(abbrev_name),\n            )\n        else:\n            formatter = plain_formatter\n        ch.setFormatter(formatter)\n        logger.addHandler(ch)\n\n    # file logging: all workers\n    if output is not None:\n        if output.endswith(\".txt\") or output.endswith(\".log\"):\n            filename = output\n        else:\n            filename = os.path.join(output, \"log.txt\")\n        if distributed_rank > 0:\n            filename = filename + f\".rank{distributed_rank}\"\n        os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n        fh = logging.StreamHandler(_cached_log_stream(filename))\n        fh.setLevel(logging.DEBUG)\n        fh.setFormatter(plain_formatter)\n        logger.addHandler(fh)\n\n    return logger\n\n\n# cache the opened file object, so that different calls to `setup_logger`\n# with the same file name can safely write to the same file.\n@functools.lru_cache(maxsize=None)\ndef _cached_log_stream(filename):\n    return open(filename, \"a\")\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/misc.py",
    "content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nMisc functions, including distributed helpers.\n\nMostly copy-paste from torchvision references.\n\"\"\"\nimport os\nimport random \nimport subprocess\nimport time\nfrom collections import OrderedDict, defaultdict, deque\nimport datetime\nimport pickle\nfrom typing import Optional, List\n\nimport json, time\nimport numpy as np\nimport torch\nimport torch.distributed as dist\nfrom torch import Tensor\n\nimport colorsys\n\n# needed due to empty tensor bug in pytorch and torchvision 0.5\nimport torchvision\n__torchvision_need_compat_flag = float(torchvision.__version__.split('.')[1]) < 7\nif __torchvision_need_compat_flag:\n    from torchvision.ops import _new_empty_tensor\n    from torchvision.ops.misc import _output_size\n\n\nclass SmoothedValue(object):\n    \"\"\"Track a series of values and provide access to smoothed values over a\n    window or the global series average.\n    \"\"\"\n\n    def __init__(self, window_size=20, fmt=None):\n        if fmt is None:\n            fmt = \"{median:.4f} ({global_avg:.4f})\"\n        self.deque = deque(maxlen=window_size)\n        self.total = 0.0\n        self.count = 0\n        self.fmt = fmt\n\n    def update(self, value, n=1):\n        self.deque.append(value)\n        self.count += n\n        self.total += value * n\n\n    def synchronize_between_processes(self):\n        \"\"\"\n        Warning: does not synchronize the deque!\n        \"\"\"\n        if not is_dist_avail_and_initialized():\n            return\n        t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')\n        dist.barrier()\n        dist.all_reduce(t)\n        t = t.tolist()\n        self.count = int(t[0])\n        self.total = t[1]\n\n    @property\n    def median(self):\n        d = torch.tensor(list(self.deque))\n        if d.shape[0] == 0:\n            return 0\n        return d.median().item()\n\n    @property\n    def avg(self):\n        d = torch.tensor(list(self.deque), dtype=torch.float32)\n        return d.mean().item()\n\n    @property\n    def global_avg(self):\n        return self.total / self.count\n\n    @property\n    def max(self):\n        return max(self.deque)\n\n    @property\n    def value(self):\n        return self.deque[-1]\n\n    def __str__(self):\n        return self.fmt.format(\n            median=self.median,\n            avg=self.avg,\n            global_avg=self.global_avg,\n            max=self.max,\n            value=self.value)\n\n\ndef all_gather(data):\n    \"\"\"\n    Run all_gather on arbitrary picklable data (not necessarily tensors)\n    Args:\n        data: any picklable object\n    Returns:\n        list[data]: list of data gathered from each rank\n    \"\"\"\n    world_size = get_world_size()\n    if world_size == 1:\n        return [data]\n\n    # serialized to a Tensor\n    buffer = pickle.dumps(data)\n    storage = torch.ByteStorage.from_buffer(buffer)\n    tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n    # obtain Tensor size of each rank\n    local_size = torch.tensor([tensor.numel()], device=\"cuda\")\n    size_list = [torch.tensor([0], device=\"cuda\") for _ in range(world_size)]\n    dist.all_gather(size_list, local_size)\n    size_list = [int(size.item()) for size in size_list]\n    max_size = max(size_list)\n\n    # receiving Tensor from all ranks\n    # we pad the tensor because torch all_gather does not support\n    # gathering tensors of different shapes\n    tensor_list = []\n    for _ in size_list:\n        tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=\"cuda\"))\n    if local_size != max_size:\n        padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device=\"cuda\")\n        tensor = torch.cat((tensor, padding), dim=0)\n    dist.all_gather(tensor_list, tensor)\n\n    data_list = []\n    for size, tensor in zip(size_list, tensor_list):\n        buffer = tensor.cpu().numpy().tobytes()[:size]\n        data_list.append(pickle.loads(buffer))\n\n    return data_list\n\n\ndef reduce_dict(input_dict, average=True):\n    \"\"\"\n    Args:\n        input_dict (dict): all the values will be reduced\n        average (bool): whether to do average or sum\n    Reduce the values in the dictionary from all processes so that all processes\n    have the averaged results. Returns a dict with the same fields as\n    input_dict, after reduction.\n    \"\"\"\n    world_size = get_world_size()\n    if world_size < 2:\n        return input_dict\n    with torch.no_grad():\n        names = []\n        values = []\n        # sort the keys so that they are consistent across processes\n        for k in sorted(input_dict.keys()):\n            names.append(k)\n            values.append(input_dict[k])\n        values = torch.stack(values, dim=0)\n        dist.all_reduce(values)\n        if average:\n            values /= world_size\n        reduced_dict = {k: v for k, v in zip(names, values)}\n    return reduced_dict\n\n\nclass MetricLogger(object):\n    def __init__(self, delimiter=\"\\t\"):\n        self.meters = defaultdict(SmoothedValue)\n        self.delimiter = delimiter\n\n    def update(self, **kwargs):\n        for k, v in kwargs.items():\n            if isinstance(v, torch.Tensor):\n                v = v.item()\n            assert isinstance(v, (float, int))\n            self.meters[k].update(v)\n\n    def __getattr__(self, attr):\n        if attr in self.meters:\n            return self.meters[attr]\n        if attr in self.__dict__:\n            return self.__dict__[attr]\n        raise AttributeError(\"'{}' object has no attribute '{}'\".format(\n            type(self).__name__, attr))\n\n    def __str__(self):\n        loss_str = []\n        for name, meter in self.meters.items():\n            # print(name, str(meter))\n            # import ipdb;ipdb.set_trace()\n            if meter.count > 0:\n                loss_str.append(\n                    \"{}: {}\".format(name, str(meter))\n                )\n        return self.delimiter.join(loss_str)\n\n    def synchronize_between_processes(self):\n        for meter in self.meters.values():\n            meter.synchronize_between_processes()\n\n    def add_meter(self, name, meter):\n        self.meters[name] = meter\n\n    def log_every(self, iterable, print_freq, header=None, logger=None):\n        if logger is None:\n            print_func = print\n        else:\n            print_func = logger.info\n\n        i = 0\n        if not header:\n            header = ''\n        start_time = time.time()\n        end = time.time()\n        iter_time = SmoothedValue(fmt='{avg:.4f}')\n        data_time = SmoothedValue(fmt='{avg:.4f}')\n        space_fmt = ':' + str(len(str(len(iterable)))) + 'd'\n        if torch.cuda.is_available():\n            log_msg = self.delimiter.join([\n                header,\n                '[{0' + space_fmt + '}/{1}]',\n                'eta: {eta}',\n                '{meters}',\n                'time: {time}',\n                'data: {data}',\n                'max mem: {memory:.0f}'\n            ])\n        else:\n            log_msg = self.delimiter.join([\n                header,\n                '[{0' + space_fmt + '}/{1}]',\n                'eta: {eta}',\n                '{meters}',\n                'time: {time}',\n                'data: {data}'\n            ])\n        MB = 1024.0 * 1024.0\n        for obj in iterable:\n            data_time.update(time.time() - end)\n            yield obj\n            # import ipdb; ipdb.set_trace()\n            iter_time.update(time.time() - end)\n            if i % print_freq == 0 or i == len(iterable) - 1:\n                eta_seconds = iter_time.global_avg * (len(iterable) - i)\n                eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n                if torch.cuda.is_available():\n                    print_func(log_msg.format(\n                        i, len(iterable), eta=eta_string,\n                        meters=str(self),\n                        time=str(iter_time), data=str(data_time),\n                        memory=torch.cuda.max_memory_allocated() / MB))\n                else:\n                    print_func(log_msg.format(\n                        i, len(iterable), eta=eta_string,\n                        meters=str(self),\n                        time=str(iter_time), data=str(data_time)))\n            i += 1\n            end = time.time()\n        total_time = time.time() - start_time\n        total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n        print_func('{} Total time: {} ({:.4f} s / it)'.format(\n            header, total_time_str, total_time / len(iterable)))\n\n\ndef get_sha():\n    cwd = os.path.dirname(os.path.abspath(__file__))\n\n    def _run(command):\n        return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()\n    sha = 'N/A'\n    diff = \"clean\"\n    branch = 'N/A'\n    try:\n        sha = _run(['git', 'rev-parse', 'HEAD'])\n        subprocess.check_output(['git', 'diff'], cwd=cwd)\n        diff = _run(['git', 'diff-index', 'HEAD'])\n        diff = \"has uncommited changes\" if diff else \"clean\"\n        branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])\n    except Exception:\n        pass\n    message = f\"sha: {sha}, status: {diff}, branch: {branch}\"\n    return message\n\n\ndef collate_fn(batch):\n    # import ipdb; ipdb.set_trace()\n    batch = list(zip(*batch))\n    batch[0] = nested_tensor_from_tensor_list(batch[0])\n    return tuple(batch)\n\n\ndef _max_by_axis(the_list):\n    # type: (List[List[int]]) -> List[int]\n    maxes = the_list[0]\n    for sublist in the_list[1:]:\n        for index, item in enumerate(sublist):\n            maxes[index] = max(maxes[index], item)\n    return maxes\n\n\nclass NestedTensor(object):\n    def __init__(self, tensors, mask: Optional[Tensor]):\n        self.tensors = tensors\n        self.mask = mask\n        if mask == 'auto':\n            self.mask = torch.zeros_like(tensors).to(tensors.device)\n            if self.mask.dim() == 3:\n                self.mask = self.mask.sum(0).to(bool)\n            elif self.mask.dim() == 4:\n                self.mask = self.mask.sum(1).to(bool)\n            else:\n                raise ValueError(\"tensors dim must be 3 or 4 but {}({})\".format(self.tensors.dim(), self.tensors.shape))\n\n    def imgsize(self):\n        res = []\n        for i in range(self.tensors.shape[0]):\n            mask = self.mask[i]\n            maxH = (~mask).sum(0).max()\n            maxW = (~mask).sum(1).max()\n            res.append(torch.Tensor([maxH, maxW]))\n        return res\n\n    def to(self, device):\n        # type: (Device) -> NestedTensor # noqa\n        cast_tensor = self.tensors.to(device)\n        mask = self.mask\n        if mask is not None:\n            assert mask is not None\n            cast_mask = mask.to(device)\n        else:\n            cast_mask = None\n        return NestedTensor(cast_tensor, cast_mask)\n\n    def to_img_list_single(self, tensor, mask):\n        assert tensor.dim() == 3, \"dim of tensor should be 3 but {}\".format(tensor.dim())\n        maxH = (~mask).sum(0).max()\n        maxW = (~mask).sum(1).max()\n        img = tensor[:, :maxH, :maxW]\n        return img\n\n    def to_img_list(self):\n        \"\"\"remove the padding and convert to img list\n\n        Returns:\n            [type]: [description]\n        \"\"\"\n        if self.tensors.dim() == 3:\n            return self.to_img_list_single(self.tensors, self.mask)\n        else:\n            res = []\n            for i in range(self.tensors.shape[0]):\n                tensor_i = self.tensors[i]\n                mask_i = self.mask[i]\n                res.append(self.to_img_list_single(tensor_i, mask_i))\n            return res\n\n    @property\n    def device(self):\n        return self.tensors.device\n\n    def decompose(self):\n        return self.tensors, self.mask\n\n    def __repr__(self):\n        return str(self.tensors)\n\n    @property\n    def shape(self):\n        return {\n            'tensors.shape': self.tensors.shape,\n            'mask.shape': self.mask.shape\n        }\n\n\ndef nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n    # TODO make this more general\n    if tensor_list[0].ndim == 3:\n        if torchvision._is_tracing():\n            # nested_tensor_from_tensor_list() does not export well to ONNX\n            # call _onnx_nested_tensor_from_tensor_list() instead\n            return _onnx_nested_tensor_from_tensor_list(tensor_list)\n\n        # TODO make it support different-sized images\n        max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n        # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n        batch_shape = [len(tensor_list)] + max_size\n        b, c, h, w = batch_shape\n        dtype = tensor_list[0].dtype\n        device = tensor_list[0].device\n        tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n        mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n        for img, pad_img, m in zip(tensor_list, tensor, mask):\n            pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n            m[: img.shape[1], :img.shape[2]] = False\n    else:\n        raise ValueError('not supported')\n    return NestedTensor(tensor, mask)\n\n\n# _onnx_nested_tensor_from_tensor_list() is an implementation of\n# nested_tensor_from_tensor_list() that is supported by ONNX tracing.\n@torch.jit.unused\ndef _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:\n    max_size = []\n    for i in range(tensor_list[0].dim()):\n        max_size_i = torch.max(torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)).to(torch.int64)\n        max_size.append(max_size_i)\n    max_size = tuple(max_size)\n\n    # work around for\n    # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n    # m[: img.shape[1], :img.shape[2]] = False\n    # which is not yet supported in onnx\n    padded_imgs = []\n    padded_masks = []\n    for img in tensor_list:\n        padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]\n        padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))\n        padded_imgs.append(padded_img)\n\n        m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)\n        padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), \"constant\", 1)\n        padded_masks.append(padded_mask.to(torch.bool))\n\n    tensor = torch.stack(padded_imgs)\n    mask = torch.stack(padded_masks)\n\n    return NestedTensor(tensor, mask=mask)\n\n\ndef setup_for_distributed(is_master):\n    \"\"\"\n    This function disables printing when not in master process\n    \"\"\"\n    import builtins as __builtin__\n    builtin_print = __builtin__.print\n\n    def print(*args, **kwargs):\n        force = kwargs.pop('force', False)\n        if is_master or force:\n            builtin_print(*args, **kwargs)\n\n    __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n    if not dist.is_available():\n        return False\n    if not dist.is_initialized():\n        return False\n    return True\n\n\ndef get_world_size():\n    if not is_dist_avail_and_initialized():\n        return 1\n    return dist.get_world_size()\n\n\ndef get_rank():\n    if not is_dist_avail_and_initialized():\n        return 0\n    return dist.get_rank()\n\n\ndef is_main_process():\n    return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n    if is_main_process():\n        torch.save(*args, **kwargs)\n\n\ndef init_distributed_mode(args):\n    if 'WORLD_SIZE' in os.environ and os.environ['WORLD_SIZE'] != '': # 'RANK' in os.environ and \n        # args.rank = int(os.environ[\"RANK\"])\n        # args.world_size = int(os.environ['WORLD_SIZE'])\n        # args.gpu = args.local_rank = int(os.environ['LOCAL_RANK'])\n\n        # launch by torch.distributed.launch\n        # Single node\n        #   python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 1 --rank 0 ...\n        # Multi nodes\n        #   python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 0 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...\n        #   python -m torch.distributed.launch --nproc_per_node=8 main.py --world-size 2 --rank 1 --dist-url 'tcp://IP_OF_NODE0:FREEPORT' ...\n\n        local_world_size = int(os.environ['WORLD_SIZE'])\n        args.world_size = args.world_size * local_world_size\n        args.gpu = args.local_rank = int(os.environ['LOCAL_RANK'])\n        args.rank = args.rank * local_world_size + args.local_rank\n        print('world size: {}, rank: {}, local rank: {}'.format(args.world_size, args.rank, args.local_rank))\n        print(json.dumps(dict(os.environ), indent=2))\n    elif 'SLURM_PROCID' in os.environ:\n        args.rank = int(os.environ['SLURM_PROCID'])\n        args.gpu = args.local_rank = int(os.environ['SLURM_LOCALID'])\n        args.world_size = int(os.environ['SLURM_NPROCS'])\n\n        print('world size: {}, world rank: {}, local rank: {}, device_count: {}'.format(args.world_size, args.rank, args.local_rank, torch.cuda.device_count()))\n    else:\n        print('Not using distributed mode')\n        args.distributed = False\n        args.world_size = 1\n        args.rank = 0\n        args.local_rank = 0\n        return\n\n    print(\"world_size:{} rank:{} local_rank:{}\".format(args.world_size, args.rank, args.local_rank))\n    args.distributed = True\n    torch.cuda.set_device(args.local_rank)\n    args.dist_backend = 'nccl'\n    print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)\n    torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,\n                                         world_size=args.world_size, rank=args.rank)\n    print(\"Before torch.distributed.barrier()\")\n    torch.distributed.barrier()\n    print(\"End torch.distributed.barrier()\")\n    setup_for_distributed(args.rank == 0)\n\n\n@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n    \"\"\"Computes the precision@k for the specified values of k\"\"\"\n    if target.numel() == 0:\n        return [torch.zeros([], device=output.device)]\n    maxk = max(topk)\n    batch_size = target.size(0)\n\n    _, pred = output.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n    res = []\n    for k in topk:\n        correct_k = correct[:k].view(-1).float().sum(0)\n        res.append(correct_k.mul_(100.0 / batch_size))\n    return res\n\n\ndef interpolate(input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None):\n    # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n    \"\"\"\n    Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n    This will eventually be supported natively by PyTorch, and this\n    class can go away.\n    \"\"\"\n    if __torchvision_need_compat_flag < 0.7:\n        if input.numel() > 0:\n            return torch.nn.functional.interpolate(\n                input, size, scale_factor, mode, align_corners\n            )\n\n        output_shape = _output_size(2, input, size, scale_factor)\n        output_shape = list(input.shape[:-2]) + list(output_shape)\n        return _new_empty_tensor(input, output_shape)\n    else:\n        return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)\n\n\n\nclass color_sys():\n    def __init__(self, num_colors) -> None:\n        self.num_colors = num_colors\n        colors=[]\n        for i in np.arange(0., 360., 360. / num_colors):\n            hue = i/360.\n            lightness = (50 + np.random.rand() * 10)/100.\n            saturation = (90 + np.random.rand() * 10)/100.\n            colors.append(tuple([int(j*255) for j in colorsys.hls_to_rgb(hue, lightness, saturation)]))\n        self.colors = colors\n\n    def __call__(self, idx):\n        return self.colors[idx]\n\ndef inverse_sigmoid(x, eps=1e-3):\n    x = x.clamp(min=0, max=1)\n    x1 = x.clamp(min=eps)\n    x2 = (1 - x).clamp(min=eps)\n    return torch.log(x1/x2)\n\ndef clean_state_dict(state_dict):\n    new_state_dict = OrderedDict()\n    for k, v in state_dict.items():\n        if k[:7] == 'module.':\n            k = k[7:]  # remove `module.`\n        new_state_dict[k] = v\n    return new_state_dict"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/plot_utils.py",
    "content": "\"\"\"\nPlotting utilities to visualize training logs.\n\"\"\"\nimport torch\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom pathlib import Path, PurePath\n\n\ndef plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):\n    '''\n    Function to plot specific fields from training log(s). Plots both training and test results.\n\n    :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file\n              - fields = which results to plot from each log file - plots both training and test for each field.\n              - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots\n              - log_name = optional, name of log file if different than default 'log.txt'.\n\n    :: Outputs - matplotlib plots of results in fields, color coded for each log file.\n               - solid lines are training results, dashed lines are test results.\n\n    '''\n    func_name = \"plot_utils.py::plot_logs\"\n\n    # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,\n    # convert single Path to list to avoid 'not iterable' error\n\n    if not isinstance(logs, list):\n        if isinstance(logs, PurePath):\n            logs = [logs]\n            print(f\"{func_name} info: logs param expects a list argument, converted to list[Path].\")\n        else:\n            raise ValueError(f\"{func_name} - invalid argument for logs parameter.\\n \\\n            Expect list[Path] or single Path obj, received {type(logs)}\")\n\n    # Quality checks - verify valid dir(s), that every item in list is Path object, and that log_name exists in each dir\n    for i, dir in enumerate(logs):\n        if not isinstance(dir, PurePath):\n            raise ValueError(f\"{func_name} - non-Path object in logs argument of {type(dir)}: \\n{dir}\")\n        if not dir.exists():\n            raise ValueError(f\"{func_name} - invalid directory in logs argument:\\n{dir}\")\n        # verify log_name exists\n        fn = Path(dir / log_name)\n        if not fn.exists():\n            print(f\"-> missing {log_name}.  Have you gotten to Epoch 1 in training?\")\n            print(f\"--> full path of missing log file: {fn}\")\n            return\n\n    # load log file(s) and plot\n    dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]\n\n    fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))\n\n    for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):\n        for j, field in enumerate(fields):\n            if field == 'mAP':\n                coco_eval = pd.DataFrame(\n                    np.stack(df.test_coco_eval_bbox.dropna().values)[:, 1]\n                ).ewm(com=ewm_col).mean()\n                axs[j].plot(coco_eval, c=color)\n            else:\n                df.interpolate().ewm(com=ewm_col).mean().plot(\n                    y=[f'train_{field}', f'test_{field}'],\n                    ax=axs[j],\n                    color=[color] * 2,\n                    style=['-', '--']\n                )\n    for ax, field in zip(axs, fields):\n        if field == 'mAP':\n            ax.legend([Path(p).name for p in logs])\n            ax.set_title(field)\n        else:\n            ax.legend([f'train', f'test'])\n            ax.set_title(field)\n\n    return fig, axs\n\ndef plot_precision_recall(files, naming_scheme='iter'):\n    if naming_scheme == 'exp_id':\n        # name becomes exp_id\n        names = [f.parts[-3] for f in files]\n    elif naming_scheme == 'iter':\n        names = [f.stem for f in files]\n    else:\n        raise ValueError(f'not supported {naming_scheme}')\n    fig, axs = plt.subplots(ncols=2, figsize=(16, 5))\n    for f, color, name in zip(files, sns.color_palette(\"Blues\", n_colors=len(files)), names):\n        data = torch.load(f)\n        # precision is n_iou, n_points, n_cat, n_area, max_det\n        precision = data['precision']\n        recall = data['params'].recThrs\n        scores = data['scores']\n        # take precision for all classes, all areas and 100 detections\n        precision = precision[0, :, :, 0, -1].mean(1)\n        scores = scores[0, :, :, 0, -1].mean(1)\n        prec = precision.mean()\n        rec = data['recall'][0, :, 0, -1].mean()\n        print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +\n              f'score={scores.mean():0.3f}, ' +\n              f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'\n              )\n        axs[0].plot(recall, precision, c=color)\n        axs[1].plot(recall, scores, c=color)\n\n    axs[0].set_title('Precision / Recall')\n    axs[0].legend(names)\n    axs[1].set_title('Scores / Recall')\n    axs[1].legend(names)\n    return fig, axs\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/slconfig.py",
    "content": "# ==========================================================\n# Modified from mmcv\n# ==========================================================\nimport os, sys\nimport os.path as osp\nimport ast\nimport tempfile\nimport shutil\nfrom importlib import import_module\n\nfrom argparse import Action\n\nfrom addict import Dict\nfrom yapf.yapflib.yapf_api import FormatCode\n\nBASE_KEY = '_base_'\nDELETE_KEY = '_delete_'\nRESERVED_KEYS = ['filename', 'text', 'pretty_text', 'get', 'dump', 'merge_from_dict']\n\n\ndef check_file_exist(filename, msg_tmpl='file \"{}\" does not exist'):\n    if not osp.isfile(filename):\n        raise FileNotFoundError(msg_tmpl.format(filename))\n\nclass ConfigDict(Dict):\n\n    def __missing__(self, name):\n        raise KeyError(name)\n\n    def __getattr__(self, name):\n        try:\n            value = super(ConfigDict, self).__getattr__(name)\n        except KeyError:\n            ex = AttributeError(f\"'{self.__class__.__name__}' object has no \"\n                                f\"attribute '{name}'\")\n        except Exception as e:\n            ex = e\n        else:\n            return value\n        raise ex\n\n\nclass SLConfig(object):\n    \"\"\"\n    config files.\n    only support .py file as config now.\n\n    ref: mmcv.utils.config\n\n    Example:\n        >>> cfg = Config(dict(a=1, b=dict(b1=[0, 1])))\n        >>> cfg.a\n        1\n        >>> cfg.b\n        {'b1': [0, 1]}\n        >>> cfg.b.b1\n        [0, 1]\n        >>> cfg = Config.fromfile('tests/data/config/a.py')\n        >>> cfg.filename\n        \"/home/kchen/projects/mmcv/tests/data/config/a.py\"\n        >>> cfg.item4\n        'test'\n        >>> cfg\n        \"Config [path: /home/kchen/projects/mmcv/tests/data/config/a.py]: \"\n        \"{'item1': [1, 2], 'item2': {'a': 0}, 'item3': True, 'item4': 'test'}\"\n    \"\"\"\n    @staticmethod\n    def _validate_py_syntax(filename):\n        with open(filename) as f:\n            content = f.read()\n        try:\n            ast.parse(content)\n        except SyntaxError:\n            raise SyntaxError('There are syntax errors in config '\n                              f'file {filename}')\n\n    @staticmethod\n    def _file2dict(filename):\n        filename = osp.abspath(osp.expanduser(filename))\n        check_file_exist(filename)\n        if filename.lower().endswith('.py'):\n            with tempfile.TemporaryDirectory() as temp_config_dir:\n                temp_config_file = tempfile.NamedTemporaryFile(\n                    dir=temp_config_dir, suffix='.py')\n                temp_config_name = osp.basename(temp_config_file.name)\n                shutil.copyfile(filename,\n                                osp.join(temp_config_dir, temp_config_name))\n                temp_module_name = osp.splitext(temp_config_name)[0]\n                sys.path.insert(0, temp_config_dir)\n                SLConfig._validate_py_syntax(filename)\n                mod = import_module(temp_module_name)\n                sys.path.pop(0)\n                cfg_dict = {\n                    name: value\n                    for name, value in mod.__dict__.items()\n                    if not name.startswith('__')\n                }\n                # delete imported module\n                del sys.modules[temp_module_name]\n                # close temp file\n                temp_config_file.close()\n        elif filename.lower().endswith(('.yml', '.yaml', '.json')):\n            from .slio import slload\n            cfg_dict = slload(filename)\n        else:\n            raise IOError('Only py/yml/yaml/json type are supported now!')\n\n        cfg_text = filename + '\\n'\n        with open(filename, 'r') as f:\n            cfg_text += f.read()\n\n        # parse the base file\n        if BASE_KEY in cfg_dict:\n            cfg_dir = osp.dirname(filename)\n            base_filename = cfg_dict.pop(BASE_KEY)\n            base_filename = base_filename if isinstance(\n                base_filename, list) else [base_filename]\n\n            cfg_dict_list = list()\n            cfg_text_list = list()\n            for f in base_filename:\n                _cfg_dict, _cfg_text = SLConfig._file2dict(osp.join(cfg_dir, f))\n                cfg_dict_list.append(_cfg_dict)\n                cfg_text_list.append(_cfg_text)\n\n            base_cfg_dict = dict()\n            for c in cfg_dict_list:\n                if len(base_cfg_dict.keys() & c.keys()) > 0:\n                    raise KeyError('Duplicate key is not allowed among bases')\n                    # TODO Allow the duplicate key while warnning user\n                base_cfg_dict.update(c)\n\n            base_cfg_dict = SLConfig._merge_a_into_b(cfg_dict, base_cfg_dict)\n            cfg_dict = base_cfg_dict\n\n            # merge cfg_text\n            cfg_text_list.append(cfg_text)\n            cfg_text = '\\n'.join(cfg_text_list)\n\n        return cfg_dict, cfg_text\n\n    @staticmethod\n    def _merge_a_into_b(a, b):\n        \"\"\"merge dict `a` into dict `b` (non-inplace).\n            values in `a` will overwrite `b`.\n            copy first to avoid inplace modification\n            \n        Args:\n            a ([type]): [description]\n            b ([type]): [description]\n\n        Returns:\n            [dict]: [description]\n        \"\"\"\n        # import ipdb; ipdb.set_trace()\n        if not isinstance(a, dict):\n            return a\n\n        b = b.copy()\n        for k, v in a.items():\n            if isinstance(v, dict) and k in b and not v.pop(DELETE_KEY, False):\n            \n                if not isinstance(b[k], dict) and not isinstance(b[k], list):\n                    # if :\n                    # import ipdb; ipdb.set_trace()\n                    raise TypeError(\n                        f'{k}={v} in child config cannot inherit from base '\n                        f'because {k} is a dict in the child config but is of '\n                        f'type {type(b[k])} in base config. You may set '\n                        f'`{DELETE_KEY}=True` to ignore the base config')\n                b[k] = SLConfig._merge_a_into_b(v, b[k])\n            elif isinstance(b, list):\n                try:\n                    _ = int(k)\n                except:\n                    raise TypeError(\n                        f'b is a list, '\n                        f'index {k} should be an int when input but {type(k)}'\n                    )\n                b[int(k)] = SLConfig._merge_a_into_b(v, b[int(k)])\n            else:   \n                b[k] = v\n                \n        return b\n\n    @staticmethod\n    def fromfile(filename):\n        cfg_dict, cfg_text = SLConfig._file2dict(filename)\n        return SLConfig(cfg_dict, cfg_text=cfg_text, filename=filename)\n\n\n    def __init__(self, cfg_dict=None, cfg_text=None, filename=None):\n        if cfg_dict is None:\n            cfg_dict = dict()\n        elif not isinstance(cfg_dict, dict):\n            raise TypeError('cfg_dict must be a dict, but '\n                            f'got {type(cfg_dict)}')\n        for key in cfg_dict:\n            if key in RESERVED_KEYS:\n                raise KeyError(f'{key} is reserved for config file')\n\n        super(SLConfig, self).__setattr__('_cfg_dict', ConfigDict(cfg_dict))\n        super(SLConfig, self).__setattr__('_filename', filename)\n        if cfg_text:\n            text = cfg_text\n        elif filename:\n            with open(filename, 'r') as f:\n                text = f.read()\n        else:\n            text = ''\n        super(SLConfig, self).__setattr__('_text', text)\n\n\n    @property\n    def filename(self):\n        return self._filename\n\n    @property\n    def text(self):\n        return self._text\n\n    @property\n    def pretty_text(self):\n\n        indent = 4\n\n        def _indent(s_, num_spaces):\n            s = s_.split('\\n')\n            if len(s) == 1:\n                return s_\n            first = s.pop(0)\n            s = [(num_spaces * ' ') + line for line in s]\n            s = '\\n'.join(s)\n            s = first + '\\n' + s\n            return s\n\n        def _format_basic_types(k, v, use_mapping=False):\n            if isinstance(v, str):\n                v_str = f\"'{v}'\"\n            else:\n                v_str = str(v)\n\n            if use_mapping:\n                k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n                attr_str = f'{k_str}: {v_str}'\n            else:\n                attr_str = f'{str(k)}={v_str}'\n            attr_str = _indent(attr_str, indent)\n\n            return attr_str\n\n        def _format_list(k, v, use_mapping=False):\n            # check if all items in the list are dict\n            if all(isinstance(_, dict) for _ in v):\n                v_str = '[\\n'\n                v_str += '\\n'.join(\n                    f'dict({_indent(_format_dict(v_), indent)}),'\n                    for v_ in v).rstrip(',')\n                if use_mapping:\n                    k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n                    attr_str = f'{k_str}: {v_str}'\n                else:\n                    attr_str = f'{str(k)}={v_str}'\n                attr_str = _indent(attr_str, indent) + ']'\n            else:\n                attr_str = _format_basic_types(k, v, use_mapping)\n            return attr_str\n\n        def _contain_invalid_identifier(dict_str):\n            contain_invalid_identifier = False\n            for key_name in dict_str:\n                contain_invalid_identifier |= \\\n                    (not str(key_name).isidentifier())\n            return contain_invalid_identifier\n\n        def _format_dict(input_dict, outest_level=False):\n            r = ''\n            s = []\n\n            use_mapping = _contain_invalid_identifier(input_dict)\n            if use_mapping:\n                r += '{'\n            for idx, (k, v) in enumerate(input_dict.items()):\n                is_last = idx >= len(input_dict) - 1\n                end = '' if outest_level or is_last else ','\n                if isinstance(v, dict):\n                    v_str = '\\n' + _format_dict(v)\n                    if use_mapping:\n                        k_str = f\"'{k}'\" if isinstance(k, str) else str(k)\n                        attr_str = f'{k_str}: dict({v_str}'\n                    else:\n                        attr_str = f'{str(k)}=dict({v_str}'\n                    attr_str = _indent(attr_str, indent) + ')' + end\n                elif isinstance(v, list):\n                    attr_str = _format_list(k, v, use_mapping) + end\n                else:\n                    attr_str = _format_basic_types(k, v, use_mapping) + end\n\n                s.append(attr_str)\n            r += '\\n'.join(s)\n            if use_mapping:\n                r += '}'\n            return r\n\n        cfg_dict = self._cfg_dict.to_dict()\n        text = _format_dict(cfg_dict, outest_level=True)\n        # copied from setup.cfg\n        yapf_style = dict(\n            based_on_style='pep8',\n            blank_line_before_nested_class_or_def=True,\n            split_before_expression_after_opening_paren=True)\n        text, _ = FormatCode(text, style_config=yapf_style, verify=True)\n\n        return text\n    \n\n    def __repr__(self):\n        return f'Config (path: {self.filename}): {self._cfg_dict.__repr__()}'\n\n    def __len__(self):\n        return len(self._cfg_dict)\n\n    def __getattr__(self, name):\n        # # debug\n        # print('+'*15)\n        # print('name=%s' % name)\n        # print(\"addr:\", id(self))\n        # # print('type(self):', type(self))\n        # print(self.__dict__)\n        # print('+'*15)\n        # if self.__dict__ == {}:\n        #     raise ValueError\n\n        return getattr(self._cfg_dict, name)\n\n    def __getitem__(self, name):\n        return self._cfg_dict.__getitem__(name)\n\n    def __setattr__(self, name, value):\n        if isinstance(value, dict):\n            value = ConfigDict(value)\n        self._cfg_dict.__setattr__(name, value)\n\n    def __setitem__(self, name, value):\n        if isinstance(value, dict):\n            value = ConfigDict(value)\n        self._cfg_dict.__setitem__(name, value)\n\n    def __iter__(self):\n        return iter(self._cfg_dict)\n\n    def dump(self, file=None):\n        # import ipdb; ipdb.set_trace()\n        if file is None:\n            return self.pretty_text\n        else:\n            with open(file, 'w') as f:\n                f.write(self.pretty_text)\n\n    def merge_from_dict(self, options):\n        \"\"\"Merge list into cfg_dict\n\n        Merge the dict parsed by MultipleKVAction into this cfg.\n\n        Examples:\n            >>> options = {'model.backbone.depth': 50,\n            ...            'model.backbone.with_cp':True}\n            >>> cfg = Config(dict(model=dict(backbone=dict(type='ResNet'))))\n            >>> cfg.merge_from_dict(options)\n            >>> cfg_dict = super(Config, self).__getattribute__('_cfg_dict')\n            >>> assert cfg_dict == dict(\n            ...     model=dict(backbone=dict(depth=50, with_cp=True)))\n\n        Args:\n            options (dict): dict of configs to merge from.\n        \"\"\"\n        option_cfg_dict = {}\n        for full_key, v in options.items():\n            d = option_cfg_dict\n            key_list = full_key.split('.')\n            for subkey in key_list[:-1]:\n                d.setdefault(subkey, ConfigDict())\n                d = d[subkey]\n            subkey = key_list[-1]\n            d[subkey] = v\n\n        cfg_dict = super(SLConfig, self).__getattribute__('_cfg_dict')\n        super(SLConfig, self).__setattr__(\n            '_cfg_dict', SLConfig._merge_a_into_b(option_cfg_dict, cfg_dict))\n\n    # for multiprocess\n    def __setstate__(self, state):\n        self.__init__(state)\n\n\n    def copy(self):\n        return SLConfig(self._cfg_dict.copy())\n\n    def deepcopy(self):\n        return SLConfig(self._cfg_dict.deepcopy())\n\n\nclass DictAction(Action):\n    \"\"\"\n    argparse action to split an argument into KEY=VALUE form\n    on the first = and append to a dictionary. List options should\n    be passed as comma separated values, i.e KEY=V1,V2,V3\n    \"\"\"\n\n    @staticmethod\n    def _parse_int_float_bool(val):\n        try:\n            return int(val)\n        except ValueError:\n            pass\n        try:\n            return float(val)\n        except ValueError:\n            pass\n        if val.lower() in ['true', 'false']:\n            return True if val.lower() == 'true' else False\n        if val.lower() in ['none', 'null']:\n            return None\n        return val\n\n    def __call__(self, parser, namespace, values, option_string=None):\n        options = {}\n        for kv in values:\n            key, val = kv.split('=', maxsplit=1)\n            val = [self._parse_int_float_bool(v) for v in val.split(',')]\n            if len(val) == 1:\n                val = val[0]\n            options[key] = val\n        setattr(namespace, self.dest, options)\n\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/slio.py",
    "content": "# ==========================================================\n# Modified from mmcv\n# ==========================================================\n\nimport json, pickle, yaml\ntry:\n    from yaml import CLoader as Loader, CDumper as Dumper\nexcept ImportError:\n    from yaml import Loader, Dumper\n\nfrom pathlib import Path\nfrom abc import ABCMeta, abstractmethod\n\n# ===========================\n# Rigister handler\n# ===========================\n\nclass BaseFileHandler(metaclass=ABCMeta):\n\n    @abstractmethod\n    def load_from_fileobj(self, file, **kwargs):\n        pass\n\n    @abstractmethod\n    def dump_to_fileobj(self, obj, file, **kwargs):\n        pass\n\n    @abstractmethod\n    def dump_to_str(self, obj, **kwargs):\n        pass\n\n    def load_from_path(self, filepath, mode='r', **kwargs):\n        with open(filepath, mode) as f:\n            return self.load_from_fileobj(f, **kwargs)\n\n    def dump_to_path(self, obj, filepath, mode='w', **kwargs):\n        with open(filepath, mode) as f:\n            self.dump_to_fileobj(obj, f, **kwargs)\n\nclass JsonHandler(BaseFileHandler):\n\n    def load_from_fileobj(self, file):\n        return json.load(file)\n\n    def dump_to_fileobj(self, obj, file, **kwargs):\n        json.dump(obj, file, **kwargs)\n\n    def dump_to_str(self, obj, **kwargs):\n        return json.dumps(obj, **kwargs)\n\nclass PickleHandler(BaseFileHandler):\n\n    def load_from_fileobj(self, file, **kwargs):\n        return pickle.load(file, **kwargs)\n\n    def load_from_path(self, filepath, **kwargs):\n        return super(PickleHandler, self).load_from_path(\n            filepath, mode='rb', **kwargs)\n\n    def dump_to_str(self, obj, **kwargs):\n        kwargs.setdefault('protocol', 2)\n        return pickle.dumps(obj, **kwargs)\n\n    def dump_to_fileobj(self, obj, file, **kwargs):\n        kwargs.setdefault('protocol', 2)\n        pickle.dump(obj, file, **kwargs)\n\n    def dump_to_path(self, obj, filepath, **kwargs):\n        super(PickleHandler, self).dump_to_path(\n            obj, filepath, mode='wb', **kwargs)\n\nclass YamlHandler(BaseFileHandler):\n\n    def load_from_fileobj(self, file, **kwargs):\n        kwargs.setdefault('Loader', Loader)\n        return yaml.load(file, **kwargs)\n\n    def dump_to_fileobj(self, obj, file, **kwargs):\n        kwargs.setdefault('Dumper', Dumper)\n        yaml.dump(obj, file, **kwargs)\n\n    def dump_to_str(self, obj, **kwargs):\n        kwargs.setdefault('Dumper', Dumper)\n        return yaml.dump(obj, **kwargs)\n\nfile_handlers = {\n    'json': JsonHandler(),\n    'yaml': YamlHandler(),\n    'yml': YamlHandler(),\n    'pickle': PickleHandler(),\n    'pkl': PickleHandler()\n}\n\n# ===========================\n# load and dump\n# ===========================\n\ndef is_str(x):\n    \"\"\"Whether the input is an string instance.\n\n    Note: This method is deprecated since python 2 is no longer supported.\n    \"\"\"\n    return isinstance(x, str)\n\ndef slload(file, file_format=None, **kwargs):\n    \"\"\"Load data from json/yaml/pickle files.\n\n    This method provides a unified api for loading data from serialized files.\n\n    Args:\n        file (str or :obj:`Path` or file-like object): Filename or a file-like\n            object.\n        file_format (str, optional): If not specified, the file format will be\n            inferred from the file extension, otherwise use the specified one.\n            Currently supported formats include \"json\", \"yaml/yml\" and\n            \"pickle/pkl\".\n\n    Returns:\n        The content from the file.\n    \"\"\"\n    if isinstance(file, Path):\n        file = str(file)\n    if file_format is None and is_str(file):\n        file_format = file.split('.')[-1]\n    if file_format not in file_handlers:\n        raise TypeError(f'Unsupported format: {file_format}')\n\n    handler = file_handlers[file_format]\n    if is_str(file):\n        obj = handler.load_from_path(file, **kwargs)\n    elif hasattr(file, 'read'):\n        obj = handler.load_from_fileobj(file, **kwargs)\n    else:\n        raise TypeError('\"file\" must be a filepath str or a file-object')\n    return obj\n\n\ndef sldump(obj, file=None, file_format=None, **kwargs):\n    \"\"\"Dump data to json/yaml/pickle strings or files.\n\n    This method provides a unified api for dumping data as strings or to files,\n    and also supports custom arguments for each file format.\n\n    Args:\n        obj (any): The python object to be dumped.\n        file (str or :obj:`Path` or file-like object, optional): If not\n            specified, then the object is dump to a str, otherwise to a file\n            specified by the filename or file-like object.\n        file_format (str, optional): Same as :func:`load`.\n\n    Returns:\n        bool: True for success, False otherwise.\n    \"\"\"\n    if isinstance(file, Path):\n        file = str(file)\n    if file_format is None:\n        if is_str(file):\n            file_format = file.split('.')[-1]\n        elif file is None:\n            raise ValueError(\n                'file_format must be specified since file is None')\n    if file_format not in file_handlers:\n        raise TypeError(f'Unsupported format: {file_format}')\n\n    handler = file_handlers[file_format]\n    if file is None:\n        return handler.dump_to_str(obj, **kwargs)\n    elif is_str(file):\n        handler.dump_to_path(obj, file, **kwargs)\n    elif hasattr(file, 'write'):\n        handler.dump_to_fileobj(obj, file, **kwargs)\n    else:\n        raise TypeError('\"file\" must be a filename str or a file-object')\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/static_data_path.py",
    "content": "coco = dict(\n    train = dict(\n        img_folder = '/comp_robot/cv_public_dataset/COCO2017/train2017',\n        ann_file = '/comp_robot/cv_public_dataset/COCO2017/annotations/instances_train2017.json'\n    ),\n    val = dict(\n        img_folder = '/comp_robot/cv_public_dataset/COCO2017/val2017',\n        ann_file = '/comp_robot/cv_public_dataset/COCO2017/annotations/instances_val2017.json'\n    )\n)"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/time_counter.py",
    "content": "import json\nimport time\n\nclass TimeCounter:\n    def __init__(self) -> None:\n        pass\n    \n    def clear(self):\n        self.timedict = {}\n        self.basetime = time.perf_counter()\n\n    def timeit(self, name):\n        nowtime = time.perf_counter() - self.basetime\n        self.timedict[name] = nowtime\n        self.basetime = time.perf_counter()\n\n\nclass TimeHolder:\n    def __init__(self) -> None:\n        self.timedict = {}\n\n    def update(self, _timedict:dict):\n        for k,v in _timedict.items():\n            if k not in self.timedict:\n                self.timedict[k] = AverageMeter(name=k, val_only=True)\n            self.timedict[k].update(val=v)\n\n    def final_res(self):\n        return {k:v.avg for k,v in self.timedict.items()}\n        \n    def __str__(self):\n        return json.dumps(self.final_res(), indent=2)\n\n\nclass AverageMeter(object):\n    \"\"\"Computes and stores the average and current value\"\"\"\n    def __init__(self, name, fmt=':f', val_only=False):\n        self.name = name\n        self.fmt = fmt\n        self.val_only = val_only\n        self.reset()\n\n    def reset(self):\n        self.val = 0\n        self.avg = 0\n        self.sum = 0\n        self.count = 0\n\n    def update(self, val, n=1):\n        self.val = val\n        self.sum += val * n\n        self.count += n\n        self.avg = self.sum / self.count\n\n    def __str__(self):\n        if self.val_only:\n            fmtstr = '{name} {val' + self.fmt + '}'\n        else:\n            fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'\n        return fmtstr.format(**self.__dict__)"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/utils.py",
    "content": "from collections import OrderedDict\nfrom copy import deepcopy\nimport json\nimport warnings\n\nimport torch\nimport numpy as np\n\ndef slprint(x, name='x'):\n    if isinstance(x, (torch.Tensor, np.ndarray)):\n        print(f'{name}.shape:', x.shape)\n    elif isinstance(x, (tuple, list)):\n        print('type x:', type(x))\n        for i in range(min(10, len(x))):\n            slprint(x[i], f'{name}[{i}]')\n    elif isinstance(x, dict):\n        for k,v in x.items():\n            slprint(v, f'{name}[{k}]')\n    else:\n        print(f'{name}.type:', type(x))\n\ndef clean_state_dict(state_dict):\n    new_state_dict = OrderedDict()\n    for k, v in state_dict.items():\n        if k[:7] == 'module.':\n            k = k[7:]  # remove `module.`\n        new_state_dict[k] = v\n    return new_state_dict\n\ndef renorm(img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) \\\n        -> torch.FloatTensor:\n    # img: tensor(3,H,W) or tensor(B,3,H,W)\n    # return: same as img\n    assert img.dim() == 3 or img.dim() == 4, \"img.dim() should be 3 or 4 but %d\" % img.dim() \n    if img.dim() == 3:\n        assert img.size(0) == 3, 'img.size(0) shoule be 3 but \"%d\". (%s)' % (img.size(0), str(img.size()))\n        img_perm = img.permute(1,2,0)\n        mean = torch.Tensor(mean)\n        std = torch.Tensor(std)\n        img_res = img_perm * std + mean\n        return img_res.permute(2,0,1)\n    else: # img.dim() == 4\n        assert img.size(1) == 3, 'img.size(1) shoule be 3 but \"%d\". (%s)' % (img.size(1), str(img.size()))\n        img_perm = img.permute(0,2,3,1)\n        mean = torch.Tensor(mean)\n        std = torch.Tensor(std)\n        img_res = img_perm * std + mean\n        return img_res.permute(0,3,1,2)\n\n\n\nclass CocoClassMapper():\n    def __init__(self) -> None:\n        self.category_map_str = {\"1\": 1, \"2\": 2, \"3\": 3, \"4\": 4, \"5\": 5, \"6\": 6, \"7\": 7, \"8\": 8, \"9\": 9, \"10\": 10, \"11\": 11, \"13\": 12, \"14\": 13, \"15\": 14, \"16\": 15, \"17\": 16, \"18\": 17, \"19\": 18, \"20\": 19, \"21\": 20, \"22\": 21, \"23\": 22, \"24\": 23, \"25\": 24, \"27\": 25, \"28\": 26, \"31\": 27, \"32\": 28, \"33\": 29, \"34\": 30, \"35\": 31, \"36\": 32, \"37\": 33, \"38\": 34, \"39\": 35, \"40\": 36, \"41\": 37, \"42\": 38, \"43\": 39, \"44\": 40, \"46\": 41, \"47\": 42, \"48\": 43, \"49\": 44, \"50\": 45, \"51\": 46, \"52\": 47, \"53\": 48, \"54\": 49, \"55\": 50, \"56\": 51, \"57\": 52, \"58\": 53, \"59\": 54, \"60\": 55, \"61\": 56, \"62\": 57, \"63\": 58, \"64\": 59, \"65\": 60, \"67\": 61, \"70\": 62, \"72\": 63, \"73\": 64, \"74\": 65, \"75\": 66, \"76\": 67, \"77\": 68, \"78\": 69, \"79\": 70, \"80\": 71, \"81\": 72, \"82\": 73, \"84\": 74, \"85\": 75, \"86\": 76, \"87\": 77, \"88\": 78, \"89\": 79, \"90\": 80}\n        self.origin2compact_mapper = {int(k):v-1 for k,v in self.category_map_str.items()}\n        self.compact2origin_mapper = {int(v-1):int(k) for k,v in self.category_map_str.items()}\n\n    def origin2compact(self, idx):\n        return self.origin2compact_mapper[int(idx)]\n\n    def compact2origin(self, idx):\n        return self.compact2origin_mapper[int(idx)]\n\ndef to_device(item, device):\n    if isinstance(item, torch.Tensor):\n        return item.to(device)\n    elif isinstance(item, list):\n        return [to_device(i, device) for i in item]\n    elif isinstance(item, dict):\n        return {k: to_device(v, device) for k,v in item.items()}\n    else:\n        raise NotImplementedError(\"Call Shilong if you use other containers! type: {}\".format(type(item)))\n\n\n\n# \ndef get_gaussian_mean(x, axis, other_axis, softmax=True):\n    \"\"\"\n\n    Args:\n        x (float): Input images(BxCxHxW)\n        axis (int): The index for weighted mean\n        other_axis (int): The other index\n\n    Returns: weighted index for axis, BxC\n\n    \"\"\"\n    mat2line = torch.sum(x, axis=other_axis)\n    # mat2line = mat2line / mat2line.mean() * 10\n    if softmax:\n        u = torch.softmax(mat2line, axis=2)\n    else:\n        u = mat2line / (mat2line.sum(2, keepdim=True) + 1e-6)\n    size = x.shape[axis]\n    ind = torch.linspace(0, 1, size).to(x.device)\n    batch = x.shape[0]\n    channel = x.shape[1]\n    index = ind.repeat([batch, channel, 1])\n    mean_position = torch.sum(index * u, dim=2)\n    return mean_position\n\ndef get_expected_points_from_map(hm, softmax=True):\n    \"\"\"get_gaussian_map_from_points\n        B,C,H,W -> B,N,2 float(0, 1) float(0, 1)\n        softargmax function\n\n    Args:\n        hm (float): Input images(BxCxHxW)\n\n    Returns: \n        weighted index for axis, BxCx2. float between 0 and 1.\n\n    \"\"\"\n    # hm = 10*hm\n    B,C,H,W = hm.shape\n    y_mean = get_gaussian_mean(hm, 2, 3, softmax=softmax) # B,C\n    x_mean = get_gaussian_mean(hm, 3, 2, softmax=softmax) # B,C\n    # return torch.cat((x_mean.unsqueeze(-1), y_mean.unsqueeze(-1)), 2)\n    return torch.stack([x_mean, y_mean], dim=2)\n\n# Positional encoding (section 5.1)\n# borrow from nerf\nclass Embedder:\n    def __init__(self, **kwargs):\n        self.kwargs = kwargs\n        self.create_embedding_fn()\n        \n    def create_embedding_fn(self):\n        embed_fns = []\n        d = self.kwargs['input_dims']\n        out_dim = 0\n        if self.kwargs['include_input']:\n            embed_fns.append(lambda x : x)\n            out_dim += d\n            \n        max_freq = self.kwargs['max_freq_log2']\n        N_freqs = self.kwargs['num_freqs']\n        \n        if self.kwargs['log_sampling']:\n            freq_bands = 2.**torch.linspace(0., max_freq, steps=N_freqs)\n        else:\n            freq_bands = torch.linspace(2.**0., 2.**max_freq, steps=N_freqs)\n            \n        for freq in freq_bands:\n            for p_fn in self.kwargs['periodic_fns']:\n                embed_fns.append(lambda x, p_fn=p_fn, freq=freq : p_fn(x * freq))\n                out_dim += d\n                    \n        self.embed_fns = embed_fns\n        self.out_dim = out_dim\n        \n    def embed(self, inputs):\n        return torch.cat([fn(inputs) for fn in self.embed_fns], -1)\n\n\ndef get_embedder(multires, i=0):\n    import torch.nn as nn\n    if i == -1:\n        return nn.Identity(), 3\n    \n    embed_kwargs = {\n                'include_input' : True,\n                'input_dims' : 3,\n                'max_freq_log2' : multires-1,\n                'num_freqs' : multires,\n                'log_sampling' : True,\n                'periodic_fns' : [torch.sin, torch.cos],\n    }\n    \n    embedder_obj = Embedder(**embed_kwargs)\n    embed = lambda x, eo=embedder_obj : eo.embed(x)\n    return embed, embedder_obj.out_dim\n\nclass APOPMeter():\n    def __init__(self) -> None:\n        self.tp = 0\n        self.fp = 0\n        self.tn = 0\n        self.fn = 0\n\n    def update(self, pred, gt):\n        \"\"\"\n        Input:\n            pred, gt: Tensor()\n        \"\"\"\n        assert pred.shape == gt.shape\n        self.tp += torch.logical_and(pred == 1, gt == 1).sum().item()\n        self.fp += torch.logical_and(pred == 1, gt == 0).sum().item()\n        self.tn += torch.logical_and(pred == 0, gt == 0).sum().item()\n        self.tn += torch.logical_and(pred == 1, gt == 0).sum().item()\n\n    def update_cm(self, tp, fp, tn, fn):\n        self.tp += tp\n        self.fp += fp\n        self.tn += tn\n        self.tn += fn\n\ndef inverse_sigmoid(x, eps=1e-5):\n    x = x.clamp(min=0, max=1)\n    x1 = x.clamp(min=eps)\n    x2 = (1 - x).clamp(min=eps)\n    return torch.log(x1/x2)\n\nimport argparse\nfrom util.slconfig import SLConfig\ndef get_raw_dict(args):\n    \"\"\"\n    return the dicf contained in args.\n    \n    e.g:\n        >>> with open(path, 'w') as f:\n                json.dump(get_raw_dict(args), f, indent=2)\n    \"\"\"\n    if isinstance(args, argparse.Namespace): \n        return vars(args)   \n    elif isinstance(args, dict):\n        return args\n    elif isinstance(args, SLConfig):\n        return args._cfg_dict\n    else:\n        raise NotImplementedError(\"Unknown type {}\".format(type(args)))\n\n\ndef stat_tensors(tensor):\n    assert tensor.dim() == 1\n    tensor_sm = tensor.softmax(0)\n    entropy = (tensor_sm * torch.log(tensor_sm + 1e-9)).sum()\n\n    return {\n        'max': tensor.max(),\n        'min': tensor.min(),\n        'mean': tensor.mean(),\n        'var': tensor.var(),\n        'std': tensor.var() ** 0.5,\n        'entropy': entropy\n    }\n\n\nclass NiceRepr:\n    \"\"\"Inherit from this class and define ``__nice__`` to \"nicely\" print your\n    objects.\n\n    Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function\n    Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.\n    If the inheriting class has a ``__len__``, method then the default\n    ``__nice__`` method will return its length.\n\n    Example:\n        >>> class Foo(NiceRepr):\n        ...    def __nice__(self):\n        ...        return 'info'\n        >>> foo = Foo()\n        >>> assert str(foo) == '<Foo(info)>'\n        >>> assert repr(foo).startswith('<Foo(info) at ')\n\n    Example:\n        >>> class Bar(NiceRepr):\n        ...    pass\n        >>> bar = Bar()\n        >>> import pytest\n        >>> with pytest.warns(None) as record:\n        >>>     assert 'object at' in str(bar)\n        >>>     assert 'object at' in repr(bar)\n\n    Example:\n        >>> class Baz(NiceRepr):\n        ...    def __len__(self):\n        ...        return 5\n        >>> baz = Baz()\n        >>> assert str(baz) == '<Baz(5)>'\n    \"\"\"\n\n    def __nice__(self):\n        \"\"\"str: a \"nice\" summary string describing this module\"\"\"\n        if hasattr(self, '__len__'):\n            # It is a common pattern for objects to use __len__ in __nice__\n            # As a convenience we define a default __nice__ for these objects\n            return str(len(self))\n        else:\n            # In all other cases force the subclass to overload __nice__\n            raise NotImplementedError(\n                f'Define the __nice__ method for {self.__class__!r}')\n\n    def __repr__(self):\n        \"\"\"str: the string of the module\"\"\"\n        try:\n            nice = self.__nice__()\n            classname = self.__class__.__name__\n            return f'<{classname}({nice}) at {hex(id(self))}>'\n        except NotImplementedError as ex:\n            warnings.warn(str(ex), category=RuntimeWarning)\n            return object.__repr__(self)\n\n    def __str__(self):\n        \"\"\"str: the string of the module\"\"\"\n        try:\n            classname = self.__class__.__name__\n            nice = self.__nice__()\n            return f'<{classname}({nice})>'\n        except NotImplementedError as ex:\n            warnings.warn(str(ex), category=RuntimeWarning)\n            return object.__repr__(self)\n\n\n\ndef ensure_rng(rng=None):\n    \"\"\"Coerces input into a random number generator.\n\n    If the input is None, then a global random state is returned.\n\n    If the input is a numeric value, then that is used as a seed to construct a\n    random state. Otherwise the input is returned as-is.\n\n    Adapted from [1]_.\n\n    Args:\n        rng (int | numpy.random.RandomState | None):\n            if None, then defaults to the global rng. Otherwise this can be an\n            integer or a RandomState class\n    Returns:\n        (numpy.random.RandomState) : rng -\n            a numpy random number generator\n\n    References:\n        .. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270  # noqa: E501\n    \"\"\"\n\n    if rng is None:\n        rng = np.random.mtrand._rand\n    elif isinstance(rng, int):\n        rng = np.random.RandomState(rng)\n    else:\n        rng = rng\n    return rng\n\ndef random_boxes(num=1, scale=1, rng=None):\n    \"\"\"Simple version of ``kwimage.Boxes.random``\n\n    Returns:\n        Tensor: shape (n, 4) in x1, y1, x2, y2 format.\n\n    References:\n        https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390\n\n    Example:\n        >>> num = 3\n        >>> scale = 512\n        >>> rng = 0\n        >>> boxes = random_boxes(num, scale, rng)\n        >>> print(boxes)\n        tensor([[280.9925, 278.9802, 308.6148, 366.1769],\n                [216.9113, 330.6978, 224.0446, 456.5878],\n                [405.3632, 196.3221, 493.3953, 270.7942]])\n    \"\"\"\n    rng = ensure_rng(rng)\n\n    tlbr = rng.rand(num, 4).astype(np.float32)\n\n    tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2])\n    tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3])\n    br_x = np.maximum(tlbr[:, 0], tlbr[:, 2])\n    br_y = np.maximum(tlbr[:, 1], tlbr[:, 3])\n\n    tlbr[:, 0] = tl_x * scale\n    tlbr[:, 1] = tl_y * scale\n    tlbr[:, 2] = br_x * scale\n    tlbr[:, 3] = br_y * scale\n\n    boxes = torch.from_numpy(tlbr)\n    return boxes\n\n\nclass ModelEma(torch.nn.Module):\n    def __init__(self, model, decay=0.9997, device=None):\n        super(ModelEma, self).__init__()\n        # make a copy of the model for accumulating moving average of weights\n        self.module = deepcopy(model)\n        self.module.eval()\n\n        # import ipdb; ipdb.set_trace()\n\n        self.decay = decay\n        self.device = device  # perform ema on different device from model if set\n        if self.device is not None:\n            self.module.to(device=device)\n\n    def _update(self, model, update_fn):\n        with torch.no_grad():\n            for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):\n                if self.device is not None:\n                    model_v = model_v.to(device=self.device)\n                ema_v.copy_(update_fn(ema_v, model_v))\n\n    def update(self, model):\n        self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m)\n\n    def set(self, model):\n        self._update(model, update_fn=lambda e, m: m)\n\nclass BestMetricSingle():\n    def __init__(self, init_res=0.0, better='large') -> None:\n        self.init_res = init_res\n        self.best_res = init_res\n        self.best_ep = -1\n\n        self.better = better\n        assert better in ['large', 'small']\n\n    def isbetter(self, new_res, old_res):\n        if self.better == 'large':\n            return new_res > old_res\n        if self.better == 'small':\n            return new_res < old_res\n\n    def update(self, new_res, ep):\n        if self.isbetter(new_res, self.best_res):\n            self.best_res = new_res\n            self.best_ep = ep\n            return True\n        return False\n\n    def __str__(self) -> str:\n        return \"best_res: {}\\t best_ep: {}\".format(self.best_res, self.best_ep)\n\n    def __repr__(self) -> str:\n        return self.__str__()\n\n    def summary(self) -> dict:\n        return {\n            'best_res': self.best_res,\n            'best_ep': self.best_ep,\n        }\n\n\nclass BestMetricHolder():\n    def __init__(self, init_res=0.0, better='large', use_ema=False) -> None:\n        self.best_all = BestMetricSingle(init_res, better)\n        self.use_ema = use_ema\n        if use_ema:\n            self.best_ema = BestMetricSingle(init_res, better)\n            self.best_regular = BestMetricSingle(init_res, better)\n    \n\n    def update(self, new_res, epoch, is_ema=False):\n        \"\"\"\n        return if the results is the best.\n        \"\"\"\n        if not self.use_ema:\n            return self.best_all.update(new_res, epoch)\n        else:\n            if is_ema:\n                self.best_ema.update(new_res, epoch)\n                return self.best_all.update(new_res, epoch)\n            else:\n                self.best_regular.update(new_res, epoch)\n                return self.best_all.update(new_res, epoch)\n\n    def summary(self):\n        if not self.use_ema:\n            return self.best_all.summary()\n\n        res = {}\n        res.update({f'all_{k}':v for k,v in self.best_all.summary().items()})\n        res.update({f'regular_{k}':v for k,v in self.best_regular.summary().items()})\n        res.update({f'ema_{k}':v for k,v in self.best_ema.summary().items()})\n        return res\n\n    def __repr__(self) -> str:\n        return json.dumps(self.summary(), indent=2)\n\n    def __str__(self) -> str:\n        return self.__repr__()\n            "
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/vis_utils.py",
    "content": "import cv2\nimport numpy as np\n\nfrom util.utils import renorm\nfrom util.misc import color_sys\n\n_color_getter = color_sys(100)\n\n# plot known and unknown box\ndef add_box_to_img(img, boxes, colorlist, brands=None):\n    \"\"\"[summary]\n\n    Args:\n        img ([type]): np.array, H,W,3\n        boxes ([type]): list of list(4)\n        colorlist: list of colors.\n        brands: text.\n\n    Return:\n        img: np.array. H,W,3.\n    \"\"\"\n    H, W = img.shape[:2]\n    for _i, (box, color) in enumerate(zip(boxes, colorlist)):\n        x, y, w, h = box[0] * W, box[1] * H, box[2] * W, box[3] * H\n        img = cv2.rectangle(img.copy(), (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), color, 2)\n        if brands is not None:\n            brand = brands[_i]\n            org = (int(x-w/2), int(y+h/2))\n            font = cv2.FONT_HERSHEY_SIMPLEX\n            fontScale = 0.5\n            thickness = 1\n            img = cv2.putText(img.copy(), str(brand), org, font, \n                fontScale, color, thickness, cv2.LINE_AA)\n    return img\n\ndef plot_dual_img(img, boxes, labels, idxs, probs=None):\n    \"\"\"[summary]\n\n    Args:\n        img ([type]): 3,H,W. tensor.\n        boxes (): tensor(Kx4) or list of tensor(1x4).\n        labels ([type]): list of ints.\n        idxs ([type]): list of ints.\n        probs (optional): listof floats.\n\n    Returns:\n        img_classcolor: np.array. H,W,3. img with class-wise label.\n        img_seqcolor: np.array. H,W,3. img with seq-wise label.\n    \"\"\"\n    # import ipdb; ipdb.set_trace()\n    boxes = [i.cpu().tolist() for i in boxes]\n    img = (renorm(img.cpu()).permute(1,2,0).numpy() * 255).astype(np.uint8)\n    # plot with class\n    class_colors = [_color_getter(i) for i in labels]\n    if probs is not None:\n        brands = [\"{},{:.2f}\".format(j,k) for j,k in zip(labels, probs)]\n    else:\n        brands = labels\n    img_classcolor = add_box_to_img(img, boxes, class_colors, brands=brands)\n    # plot with seq\n    seq_colors = [_color_getter((i * 11) % 100) for i in idxs]\n    img_seqcolor = add_box_to_img(img, boxes, seq_colors, brands=idxs)\n    return img_classcolor, img_seqcolor\n\n\ndef plot_raw_img(img, boxes, labels):\n    \"\"\"[summary]\n\n    Args:\n        img ([type]): 3,H,W. tensor. \n        boxes ([type]): Kx4. tensor\n        labels ([type]): K. tensor.\n\n    return:\n        img: np.array. H,W,3. img with bbox annos.\n    \n    \"\"\"\n    img = (renorm(img.cpu()).permute(1,2,0).numpy() * 255).astype(np.uint8)\n    H, W = img.shape[:2]\n    for box, label in zip(boxes.tolist(), labels.tolist()):\n        x, y, w, h = box[0] * W, box[1] * H, box[2] * W, box[3] * H\n        # import ipdb; ipdb.set_trace()\n        img = cv2.rectangle(img.copy(), (int(x-w/2), int(y-h/2)), (int(x+w/2), int(y+h/2)), _color_getter(label), 2)\n        # add text\n        org = (int(x-w/2), int(y+h/2))\n        font = cv2.FONT_HERSHEY_SIMPLEX\n        fontScale = 1\n        thickness = 1\n        img = cv2.putText(img.copy(), str(label), org, font, \n            fontScale, _color_getter(label), thickness, cv2.LINE_AA)\n\n    return img"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/util/visualizer.py",
    "content": "# -*- coding: utf-8 -*-\n'''\n@File    :   visualizer.py\n@Time    :   2022/04/05 11:39:33\n@Author  :   Shilong Liu \n@Contact :   liusl20@mail.tsinghua.edu.cn; slongliu86@gmail.com\nModified from COCO evaluator\n'''\n\nimport os, sys\nfrom textwrap import wrap\nimport torch\nimport numpy as np\nimport cv2\nimport datetime\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.collections import PatchCollection\nfrom matplotlib.patches import Polygon\nfrom pycocotools import mask as maskUtils\nfrom matplotlib import transforms\n\ndef renorm(img: torch.FloatTensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) \\\n        -> torch.FloatTensor:\n    # img: tensor(3,H,W) or tensor(B,3,H,W)\n    # return: same as img\n    assert img.dim() == 3 or img.dim() == 4, \"img.dim() should be 3 or 4 but %d\" % img.dim() \n    if img.dim() == 3:\n        assert img.size(0) == 3, 'img.size(0) shoule be 3 but \"%d\". (%s)' % (img.size(0), str(img.size()))\n        img_perm = img.permute(1,2,0)\n        mean = torch.Tensor(mean)\n        std = torch.Tensor(std)\n        img_res = img_perm * std + mean\n        return img_res.permute(2,0,1)\n    else: # img.dim() == 4\n        assert img.size(1) == 3, 'img.size(1) shoule be 3 but \"%d\". (%s)' % (img.size(1), str(img.size()))\n        img_perm = img.permute(0,2,3,1)\n        mean = torch.Tensor(mean)\n        std = torch.Tensor(std)\n        img_res = img_perm * std + mean\n        return img_res.permute(0,3,1,2)\n\nclass ColorMap():\n    def __init__(self, basergb=[255,255,0]):\n        self.basergb = np.array(basergb)\n    def __call__(self, attnmap):\n        # attnmap: h, w. np.uint8.\n        # return: h, w, 4. np.uint8.\n        assert attnmap.dtype == np.uint8\n        h, w = attnmap.shape\n        res = self.basergb.copy()\n        res = res[None][None].repeat(h, 0).repeat(w, 1) # h, w, 3\n        attn1 = attnmap.copy()[..., None] # h, w, 1\n        res = np.concatenate((res, attn1), axis=-1).astype(np.uint8)\n        return res\n\n\nclass COCOVisualizer():\n    def __init__(self) -> None:\n        pass\n\n    def visualize(self, img, tgt, caption=None, dpi=120, savedir=None, show_in_console=True):\n        \"\"\"\n        img: tensor(3, H, W)\n        tgt: make sure they are all on cpu.\n            must have items: 'image_id', 'boxes', 'size'\n        \"\"\"\n        plt.figure(dpi=dpi)\n        plt.rcParams['font.size'] = '5'\n        ax = plt.gca()\n        img = renorm(img).permute(1, 2, 0)\n        # if os.environ.get('IPDB_SHILONG_DEBUG', None) == 'INFO':\n        #     import ipdb; ipdb.set_trace()\n        ax.imshow(img)\n        \n        self.addtgt(tgt)\n        if show_in_console:\n            plt.show()\n\n        if savedir is not None:\n            if caption is None:\n                savename = '{}/{}-{}.png'.format(savedir, int(tgt['image_id']), str(datetime.datetime.now()).replace(' ', '-'))\n            else:\n                savename = '{}/{}-{}-{}.png'.format(savedir, caption, int(tgt['image_id']), str(datetime.datetime.now()).replace(' ', '-'))\n            print(\"savename: {}\".format(savename))\n            os.makedirs(os.path.dirname(savename), exist_ok=True)\n            plt.savefig(savename)\n        plt.close()\n\n    def addtgt(self, tgt):\n        \"\"\"\n        - tgt: dict. args:\n            - boxes: num_boxes, 4. xywh, [0,1].\n            - box_label: num_boxes.\n        \"\"\"\n        assert 'boxes' in tgt\n        ax = plt.gca()\n        H, W = tgt['size'].tolist() \n        numbox = tgt['boxes'].shape[0]\n\n        color = []\n        polygons = []\n        boxes = []\n        for box in tgt['boxes'].cpu():\n            unnormbbox = box * torch.Tensor([W, H, W, H])\n            unnormbbox[:2] -= unnormbbox[2:] / 2\n            [bbox_x, bbox_y, bbox_w, bbox_h] = unnormbbox.tolist()\n            boxes.append([bbox_x, bbox_y, bbox_w, bbox_h])\n            poly = [[bbox_x, bbox_y], [bbox_x, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y+bbox_h], [bbox_x+bbox_w, bbox_y]]\n            np_poly = np.array(poly).reshape((4,2))\n            polygons.append(Polygon(np_poly))\n            c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]\n            color.append(c)\n\n        p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.1)\n        ax.add_collection(p)\n        p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)\n        ax.add_collection(p)\n\n\n        if 'box_label' in tgt:\n            assert len(tgt['box_label']) == numbox, f\"{len(tgt['box_label'])} = {numbox}, \"\n            for idx, bl in enumerate(tgt['box_label']):\n                _string = str(bl)\n                bbox_x, bbox_y, bbox_w, bbox_h = boxes[idx]\n                # ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': 'yellow', 'alpha': 1.0, 'pad': 1})\n                ax.text(bbox_x, bbox_y, _string, color='black', bbox={'facecolor': color[idx], 'alpha': 0.6, 'pad': 1})\n\n        if 'caption' in tgt:\n            ax.set_title(tgt['caption'], wrap=True)\n\n\n"
  },
  {
    "path": "projects/instance_segment_anything/models/focalnet_dino/models/dino/utils.py",
    "content": "# ------------------------------------------------------------------------\n# DINO\n# Copyright (c) 2022 IDEA. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n\nimport torch\nimport random\nfrom torch import nn, Tensor\nimport os\n\nimport math\nimport torch.nn.functional as F\nfrom torch import nn\n\n\n\n\ndef gen_encoder_output_proposals(memory:Tensor, memory_padding_mask:Tensor, spatial_shapes:Tensor, learnedwh=None):\n    \"\"\"\n    Input:\n        - memory: bs, \\sum{hw}, d_model\n        - memory_padding_mask: bs, \\sum{hw}\n        - spatial_shapes: nlevel, 2\n        - learnedwh: 2\n    Output:\n        - output_memory: bs, \\sum{hw}, d_model\n        - output_proposals: bs, \\sum{hw}, 4\n    \"\"\"\n    N_, S_, C_ = memory.shape\n    base_scale = 4.0\n    proposals = []\n    _cur = 0\n    for lvl, (H_, W_) in enumerate(spatial_shapes):\n        mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H_ * W_)].view(N_, H_, W_, 1)\n        valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n        valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n        # import ipdb; ipdb.set_trace()\n\n        grid_y, grid_x = torch.meshgrid(torch.linspace(0, H_ - 1, H_, dtype=torch.float32, device=memory.device),\n                                        torch.linspace(0, W_ - 1, W_, dtype=torch.float32, device=memory.device))\n        grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) # H_, W_, 2\n\n        scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N_, 1, 1, 2)\n        grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale\n\n        if learnedwh is not None:\n            # import ipdb; ipdb.set_trace()\n            wh = torch.ones_like(grid) * learnedwh.sigmoid() * (2.0 ** lvl)\n        else:\n            wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)\n\n        # scale = torch.cat([W_[None].unsqueeze(-1), H_[None].unsqueeze(-1)], 1).view(1, 1, 1, 2).repeat(N_, 1, 1, 1)\n        # grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale\n        # wh = torch.ones_like(grid) / scale\n        proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)\n        proposals.append(proposal)\n        _cur += (H_ * W_)\n    # import ipdb; ipdb.set_trace()\n    output_proposals = torch.cat(proposals, 1)\n    output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True)\n    output_proposals = torch.log(output_proposals / (1 - output_proposals)) # unsigmoid\n    output_proposals = output_proposals.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))\n    output_proposals = output_proposals.masked_fill(~output_proposals_valid, float('inf'))\n\n    output_memory = memory\n    output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n    output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n\n    # output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float('inf'))\n    # output_memory = output_memory.masked_fill(~output_proposals_valid, float('inf'))\n\n    return output_memory, output_proposals\n\n\nclass RandomBoxPerturber():\n    def __init__(self, x_noise_scale=0.2, y_noise_scale=0.2, w_noise_scale=0.2, h_noise_scale=0.2) -> None:\n        self.noise_scale = torch.Tensor([x_noise_scale, y_noise_scale, w_noise_scale, h_noise_scale])\n\n    def __call__(self, refanchors: Tensor) -> Tensor:\n        nq, bs, query_dim = refanchors.shape\n        device = refanchors.device\n\n        noise_raw = torch.rand_like(refanchors)\n        noise_scale = self.noise_scale.to(device)[:query_dim]\n\n        new_refanchors = refanchors * (1 + (noise_raw - 0.5) * noise_scale)\n        return new_refanchors.clamp_(0, 1)\n        \ndef sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2):\n    \"\"\"\n    Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n    Args:\n        inputs: A float tensor of arbitrary shape.\n                The predictions for each example.\n        targets: A float tensor with the same shape as inputs. Stores the binary\n                 classification label for each element in inputs\n                (0 for the negative class and 1 for the positive class).\n        alpha: (optional) Weighting factor in range (0,1) to balance\n                positive vs negative examples. Default = -1 (no weighting).\n        gamma: Exponent of the modulating factor (1 - p_t) to\n               balance easy vs hard examples.\n    Returns:\n        Loss tensor\n    \"\"\"\n    prob = inputs.sigmoid()\n    ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n    p_t = prob * targets + (1 - prob) * (1 - targets)\n    loss = ce_loss * ((1 - p_t) ** gamma)\n\n    if alpha >= 0:\n        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n        loss = alpha_t * loss\n\n\n    return loss.mean(1).sum() / num_boxes\n\nclass MLP(nn.Module):\n    \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n        super().__init__()\n        self.num_layers = num_layers\n        h = [hidden_dim] * (num_layers - 1)\n        self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))\n\n    def forward(self, x):\n        for i, layer in enumerate(self.layers):\n            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n        return x\n\ndef _get_activation_fn(activation, d_model=256, batch_dim=0):\n    \"\"\"Return an activation function given a string\"\"\"\n    if activation == \"relu\":\n        return F.relu\n    if activation == \"gelu\":\n        return F.gelu\n    if activation == \"glu\":\n        return F.glu\n    if activation == \"prelu\":\n        return nn.PReLU()\n    if activation == \"selu\":\n        return F.selu\n\n    raise RuntimeError(F\"activation should be relu/gelu, not {activation}.\")\n\n\n\n\ndef gen_sineembed_for_position(pos_tensor):\n    # n_query, bs, _ = pos_tensor.size()\n    # sineembed_tensor = torch.zeros(n_query, bs, 256)\n    scale = 2 * math.pi\n    dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device)\n    dim_t = 10000 ** (2 * (dim_t // 2) / 128)\n    x_embed = pos_tensor[:, :, 0] * scale\n    y_embed = pos_tensor[:, :, 1] * scale\n    pos_x = x_embed[:, :, None] / dim_t\n    pos_y = y_embed[:, :, None] / dim_t\n    pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2)\n    pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2)\n    if pos_tensor.size(-1) == 2:\n        pos = torch.cat((pos_y, pos_x), dim=2)\n    elif pos_tensor.size(-1) == 4:\n        w_embed = pos_tensor[:, :, 2] * scale\n        pos_w = w_embed[:, :, None] / dim_t\n        pos_w = torch.stack((pos_w[:, :, 0::2].sin(), pos_w[:, :, 1::2].cos()), dim=3).flatten(2)\n\n        h_embed = pos_tensor[:, :, 3] * scale\n        pos_h = h_embed[:, :, None] / dim_t\n        pos_h = torch.stack((pos_h[:, :, 0::2].sin(), pos_h[:, :, 1::2].cos()), dim=3).flatten(2)\n\n        pos = torch.cat((pos_y, pos_x, pos_w, pos_h), dim=2)\n    else:\n        raise ValueError(\"Unknown pos_tensor shape(-1):{}\".format(pos_tensor.size(-1)))\n    return pos"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/hdetr_wrapper.py",
    "content": "import torch\nimport torch.nn.functional as F\nfrom mmcv.runner import BaseModule\n\nfrom .models import build_model\nfrom .models.util.misc import NestedTensor, inverse_sigmoid\n\n\nclass HDetrWrapper(BaseModule):\n    def __init__(self,\n                 args=None,\n                 init_cfg=None):\n        super(HDetrWrapper, self).__init__(init_cfg)\n        model, box_postprocessor = build_model(args)\n        self.model = model\n        self.box_postprocessor = box_postprocessor\n\n        self.model.num_queries = self.model.num_queries_one2one\n        self.model.transformer.two_stage_num_proposals = self.model.num_queries\n        self.cls_index = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28,\n                          31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54,\n                          55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81,\n                          82, 84, 85, 86, 87, 88, 89, 90]\n\n    def forward(self,\n                img,\n                img_metas):\n        \"\"\"Forward function for training mode.\n        Args:\n            img (Tensor): of shape (N, C, H, W) encoding input images.\n                Typically these should be mean centered and std scaled.\n            img_metas (list[dict]): Meta information of each image, e.g.,\n                image size, scaling factor, etc.\n        \"\"\"\n        input_img_h, input_img_w = img_metas[0][\"batch_input_shape\"]\n        batch_size = img.size(0)\n        img_masks = img.new_ones((batch_size, input_img_h, input_img_w),\n                                 dtype=torch.bool)\n        for img_id in range(batch_size):\n            img_h, img_w, _ = img_metas[img_id][\"img_shape\"]\n            img_masks[img_id, :img_h, :img_w] = False\n        samples = NestedTensor(tensors=img, mask=img_masks)\n        features, pos = self.model.backbone(samples)\n\n        srcs = []\n        masks = []\n        for l, feat in enumerate(features):\n            src, mask = feat.decompose()\n            srcs.append(self.model.input_proj[l](src))\n            masks.append(mask)\n            assert mask is not None\n        if self.model.num_feature_levels > len(srcs):\n            _len_srcs = len(srcs)\n            for l in range(_len_srcs, self.model.num_feature_levels):\n                if l == _len_srcs:\n                    src = self.model.input_proj[l](features[-1].tensors)\n                else:\n                    src = self.model.input_proj[l](srcs[-1])\n                m = samples.mask\n                mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(\n                    torch.bool\n                )[0]\n                pos_l = self.model.backbone[1](NestedTensor(src, mask)).to(src.dtype)\n                srcs.append(src)\n                masks.append(mask)\n                pos.append(pos_l)\n\n        query_embeds = None\n        if not self.model.two_stage or self.model.mixed_selection:\n            query_embeds = self.model.query_embed.weight[0: self.model.num_queries, :]\n\n        # make attn mask\n        \"\"\" attention mask to prevent information leakage\n        \"\"\"\n        self_attn_mask = (\n            torch.zeros([self.model.num_queries, self.model.num_queries, ]).bool().to(src.device)\n        )\n        self_attn_mask[self.model.num_queries_one2one:, 0: self.model.num_queries_one2one, ] = True\n        self_attn_mask[0: self.model.num_queries_one2one, self.model.num_queries_one2one:, ] = True\n\n        (\n            hs,\n            init_reference,\n            inter_references,\n            enc_outputs_class,\n            enc_outputs_coord_unact,\n        ) = self.model.transformer(srcs, masks, pos, query_embeds, self_attn_mask)\n\n        outputs_classes_one2one = []\n        outputs_coords_one2one = []\n        outputs_classes_one2many = []\n        outputs_coords_one2many = []\n        for lvl in range(hs.shape[0]):\n            if lvl == 0:\n                reference = init_reference\n            else:\n                reference = inter_references[lvl - 1]\n            reference = inverse_sigmoid(reference)\n            outputs_class = self.model.class_embed[lvl](hs[lvl])\n            tmp = self.model.bbox_embed[lvl](hs[lvl])\n            if reference.shape[-1] == 4:\n                tmp += reference\n            else:\n                assert reference.shape[-1] == 2\n                tmp[..., :2] += reference\n            outputs_coord = tmp.sigmoid()\n\n            outputs_classes_one2one.append(\n                outputs_class[:, 0: self.model.num_queries_one2one]\n            )\n            outputs_classes_one2many.append(\n                outputs_class[:, self.model.num_queries_one2one:]\n            )\n            outputs_coords_one2one.append(\n                outputs_coord[:, 0: self.model.num_queries_one2one]\n            )\n            outputs_coords_one2many.append(outputs_coord[:, self.model.num_queries_one2one:])\n        outputs_classes_one2one = torch.stack(outputs_classes_one2one)\n        outputs_coords_one2one = torch.stack(outputs_coords_one2one)\n\n        sampled_logits = outputs_classes_one2one[-1][:, :, self.cls_index]\n        out = {\n            \"pred_logits\": sampled_logits,\n            \"pred_boxes\": outputs_coords_one2one[-1],\n        }\n        return out\n\n    def simple_test(self, img, img_metas, rescale=False):\n        # out: dict\n        out = self(img, img_metas)\n        if rescale:\n            ori_target_sizes = [meta_info['ori_shape'][:2] for meta_info in img_metas]\n        else:\n            ori_target_sizes = [meta_info['img_shape'][:2] for meta_info in img_metas]\n        ori_target_sizes = out['pred_logits'].new_tensor(ori_target_sizes, dtype=torch.int64)\n        # results: List[dict(scores, labels, boxes)]\n        results = self.box_postprocessor(out, ori_target_sizes)\n        return results\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/__init__.py",
    "content": "# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\nfrom .deformable_detr import build\n\n\ndef build_model(args):\n    return build(args)\n\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/backbone.py",
    "content": "# ------------------------------------------------------------------------\n# H-DETR\n# Copyright (c) 2022 Peking University & Microsoft Research Asia. All Rights Reserved.\n# Licensed under the MIT-style license found in the LICENSE file in the root directory\n# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\"\"\"\nBackbone modules.\n\"\"\"\nfrom collections import OrderedDict\n\nimport torch\nimport torch.nn.functional as F\nimport torchvision\nfrom torch import nn\nfrom torchvision.models._utils import IntermediateLayerGetter\nfrom typing import Dict, List\n\nfrom .util.misc import NestedTensor, is_main_process\n\nfrom .position_encoding import build_position_encoding\nfrom .swin_transformer import SwinTransformer\n\n\nclass FrozenBatchNorm2d(torch.nn.Module):\n    \"\"\"\n    BatchNorm2d where the batch statistics and the affine parameters are fixed.\n\n    Copy-paste from torchvision.misc.ops with added eps before rqsrt,\n    without which any other models than torchvision.models.resnet[18,34,50,101]\n    produce nans.\n    \"\"\"\n\n    def __init__(self, n, eps=1e-5):\n        super(FrozenBatchNorm2d, self).__init__()\n        self.register_buffer(\"weight\", torch.ones(n))\n        self.register_buffer(\"bias\", torch.zeros(n))\n        self.register_buffer(\"running_mean\", torch.zeros(n))\n        self.register_buffer(\"running_var\", torch.ones(n))\n        self.eps = eps\n\n    def _load_from_state_dict(\n        self,\n        state_dict,\n        prefix,\n        local_metadata,\n        strict,\n        missing_keys,\n        unexpected_keys,\n        error_msgs,\n    ):\n        num_batches_tracked_key = prefix + \"num_batches_tracked\"\n        if num_batches_tracked_key in state_dict:\n            del state_dict[num_batches_tracked_key]\n\n        super(FrozenBatchNorm2d, self)._load_from_state_dict(\n            state_dict,\n            prefix,\n            local_metadata,\n            strict,\n            missing_keys,\n            unexpected_keys,\n            error_msgs,\n        )\n\n    def forward(self, x):\n        # move reshapes to the beginning\n        # to make it fuser-friendly\n        w = self.weight.reshape(1, -1, 1, 1)\n        b = self.bias.reshape(1, -1, 1, 1)\n        rv = self.running_var.reshape(1, -1, 1, 1)\n        rm = self.running_mean.reshape(1, -1, 1, 1)\n        eps = self.eps\n        scale = w * (rv + eps).rsqrt()\n        bias = b - rm * scale\n        return x * scale + bias\n\n\nclass BackboneBase(nn.Module):\n    def __init__(\n        self, backbone: nn.Module, train_backbone: bool, return_interm_layers: bool\n    ):\n        super().__init__()\n        for name, parameter in backbone.named_parameters():\n            if (\n                not train_backbone\n                or \"layer2\" not in name\n                and \"layer3\" not in name\n                and \"layer4\" not in name\n            ):\n                parameter.requires_grad_(False)\n        if return_interm_layers:\n            # return_layers = {\"layer1\": \"0\", \"layer2\": \"1\", \"layer3\": \"2\", \"layer4\": \"3\"}\n            return_layers = {\"layer2\": \"0\", \"layer3\": \"1\", \"layer4\": \"2\"}\n            self.strides = [8, 16, 32]\n            self.num_channels = [512, 1024, 2048]\n        else:\n            return_layers = {\"layer4\": \"0\"}\n            self.strides = [32]\n            self.num_channels = [2048]\n        self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)\n\n    def forward(self, tensor_list: NestedTensor):\n        xs = self.body(tensor_list.tensors)\n        out: Dict[str, NestedTensor] = {}\n        for name, x in xs.items():\n            m = tensor_list.mask\n            assert m is not None\n            mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]\n            out[name] = NestedTensor(x, mask)\n        return out\n\n\nclass Backbone(BackboneBase):\n    \"\"\"ResNet backbone with frozen BatchNorm.\"\"\"\n\n    def __init__(\n        self,\n        name: str,\n        train_backbone: bool,\n        return_interm_layers: bool,\n        dilation: bool,\n    ):\n        norm_layer = FrozenBatchNorm2d\n        backbone = getattr(torchvision.models, name)(\n            replace_stride_with_dilation=[False, False, dilation],\n            pretrained=is_main_process(),\n            norm_layer=norm_layer,\n        )\n        assert name not in (\"resnet18\", \"resnet34\"), \"number of channels are hard coded\"\n        super().__init__(backbone, train_backbone, return_interm_layers)\n        if dilation:\n            self.strides[-1] = self.strides[-1] // 2\n\n\nclass TransformerBackbone(nn.Module):\n    def __init__(\n        self, backbone: str, train_backbone: bool, return_interm_layers: bool, args\n    ):\n        super().__init__()\n        out_indices = (1, 2, 3)\n        if backbone == \"swin_tiny\":\n            backbone = SwinTransformer(\n                embed_dim=96,\n                depths=[2, 2, 6, 2],\n                num_heads=[3, 6, 12, 24],\n                window_size=7,\n                ape=False,\n                drop_path_rate=args.drop_path_rate,\n                patch_norm=True,\n                use_checkpoint=True,\n                out_indices=out_indices,\n            )\n            embed_dim = 96\n            # backbone.init_weights(args.pretrained_backbone_path)\n        elif backbone == \"swin_small\":\n            backbone = SwinTransformer(\n                embed_dim=96,\n                depths=[2, 2, 18, 2],\n                num_heads=[3, 6, 12, 24],\n                window_size=7,\n                ape=False,\n                drop_path_rate=args.drop_path_rate,\n                patch_norm=True,\n                use_checkpoint=True,\n                out_indices=out_indices,\n            )\n            embed_dim = 96\n            # backbone.init_weights(args.pretrained_backbone_path)\n        elif backbone == \"swin_large\":\n            backbone = SwinTransformer(\n                embed_dim=192,\n                depths=[2, 2, 18, 2],\n                num_heads=[6, 12, 24, 48],\n                window_size=7,\n                ape=False,\n                drop_path_rate=args.drop_path_rate,\n                patch_norm=True,\n                use_checkpoint=True,\n                out_indices=out_indices,\n            )\n            embed_dim = 192\n            # backbone.init_weights(args.pretrained_backbone_path)\n        elif backbone == \"swin_large_window12\":\n            backbone = SwinTransformer(\n                pretrain_img_size=384,\n                embed_dim=192,\n                depths=[2, 2, 18, 2],\n                num_heads=[6, 12, 24, 48],\n                window_size=12,\n                ape=False,\n                drop_path_rate=args.drop_path_rate,\n                patch_norm=True,\n                use_checkpoint=True,\n                out_indices=out_indices,\n            )\n            embed_dim = 192\n            # backbone.init_weights(args.pretrained_backbone_path)\n        else:\n            raise NotImplementedError\n\n        for name, parameter in backbone.named_parameters():\n            # TODO: freeze some layers?\n            if not train_backbone:\n                parameter.requires_grad_(False)\n\n        if return_interm_layers:\n\n            self.strides = [8, 16, 32]\n            self.num_channels = [\n                embed_dim * 2,\n                embed_dim * 4,\n                embed_dim * 8,\n            ]\n        else:\n            self.strides = [32]\n            self.num_channels = [embed_dim * 8]\n\n        self.body = backbone\n\n    def forward(self, tensor_list: NestedTensor):\n        xs = self.body(tensor_list.tensors)\n\n        out: Dict[str, NestedTensor] = {}\n        for name, x in xs.items():\n            m = tensor_list.mask\n            assert m is not None\n            mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]\n            out[name] = NestedTensor(x, mask)\n        return out\n\n\nclass Joiner(nn.Sequential):\n    def __init__(self, backbone, position_embedding):\n        super().__init__(backbone, position_embedding)\n        self.strides = backbone.strides\n        self.num_channels = backbone.num_channels\n\n    def forward(self, tensor_list: NestedTensor):\n        xs = self[0](tensor_list)\n        out: List[NestedTensor] = []\n        pos = []\n        for name, x in sorted(xs.items()):\n            out.append(x)\n\n        # position encoding\n        for x in out:\n            pos.append(self[1](x).to(x.tensors.dtype))\n\n        return out, pos\n\n\ndef build_backbone(args):\n    position_embedding = build_position_encoding(args)\n    train_backbone = False\n    return_interm_layers = args.masks or (args.num_feature_levels > 1)\n    if \"resnet\" in args.backbone:\n        backbone = Backbone(\n            args.backbone, train_backbone, return_interm_layers, args.dilation,\n        )\n    else:\n        backbone = TransformerBackbone(\n            args.backbone, train_backbone, return_interm_layers, args\n        )\n    model = Joiner(backbone, position_embedding)\n    return model\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/deformable_detr.py",
    "content": "# ------------------------------------------------------------------------\n# H-DETR\n# Copyright (c) 2022 Peking University & Microsoft Research Asia. All Rights Reserved.\n# Licensed under the MIT-style license found in the LICENSE file in the root directory\n# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\"\"\"\nDeformable DETR model and criterion classes.\n\"\"\"\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nimport math\n\nfrom .util import box_ops\nfrom .util.misc import (\n    NestedTensor,\n    nested_tensor_from_tensor_list,\n    accuracy,\n    get_world_size,\n    interpolate,\n    is_dist_avail_and_initialized,\n    inverse_sigmoid,\n)\n\nfrom .backbone import build_backbone\nfrom .matcher import build_matcher\nfrom .segmentation import (\n    DETRsegm,\n    PostProcessPanoptic,\n    PostProcessSegm,\n    dice_loss,\n    sigmoid_focal_loss,\n)\nfrom .deformable_transformer import build_deforamble_transformer\nimport copy\n\n\ndef _get_clones(module, N):\n    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\nclass DeformableDETR(nn.Module):\n    \"\"\" This is the Deformable DETR module that performs object detection \"\"\"\n\n    def __init__(\n        self,\n        backbone,\n        transformer,\n        num_classes,\n        num_feature_levels,\n        aux_loss=True,\n        with_box_refine=False,\n        two_stage=False,\n        num_queries_one2one=300,\n        num_queries_one2many=0,\n        mixed_selection=False,\n    ):\n        \"\"\" Initializes the model.\n        Parameters:\n            backbone: torch module of the backbone to be used. See backbone.py\n            transformer: torch module of the transformer architecture. See transformer.py\n            num_classes: number of object classes\n            aux_loss: True if auxiliary decoding losses (loss at each decoder layer) are to be used.\n            with_box_refine: iterative bounding box refinement\n            two_stage: two-stage Deformable DETR\n            num_queries_one2one: number of object queries for one-to-one matching part\n            num_queries_one2many: number of object queries for one-to-many matching part\n            mixed_selection: a trick for Deformable DETR two stage\n\n        \"\"\"\n        super().__init__()\n        num_queries = num_queries_one2one + num_queries_one2many\n        self.num_queries = num_queries\n        self.transformer = transformer\n        hidden_dim = transformer.d_model\n        self.class_embed = nn.Linear(hidden_dim, num_classes)\n        self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)\n        self.num_feature_levels = num_feature_levels\n        if not two_stage:\n            self.query_embed = nn.Embedding(num_queries, hidden_dim * 2)\n        elif mixed_selection:\n            self.query_embed = nn.Embedding(num_queries, hidden_dim)\n        if num_feature_levels > 1:\n            num_backbone_outs = len(backbone.strides)\n            input_proj_list = []\n            for _ in range(num_backbone_outs):\n                in_channels = backbone.num_channels[_]\n                input_proj_list.append(\n                    nn.Sequential(\n                        nn.Conv2d(in_channels, hidden_dim, kernel_size=1),\n                        nn.GroupNorm(32, hidden_dim),\n                    )\n                )\n            for _ in range(num_feature_levels - num_backbone_outs):\n                input_proj_list.append(\n                    nn.Sequential(\n                        nn.Conv2d(\n                            in_channels, hidden_dim, kernel_size=3, stride=2, padding=1\n                        ),\n                        nn.GroupNorm(32, hidden_dim),\n                    )\n                )\n                in_channels = hidden_dim\n            self.input_proj = nn.ModuleList(input_proj_list)\n        else:\n            self.input_proj = nn.ModuleList(\n                [\n                    nn.Sequential(\n                        nn.Conv2d(backbone.num_channels[0], hidden_dim, kernel_size=1),\n                        nn.GroupNorm(32, hidden_dim),\n                    )\n                ]\n            )\n        self.backbone = backbone\n        self.aux_loss = aux_loss\n        self.with_box_refine = with_box_refine\n        self.two_stage = two_stage\n\n        prior_prob = 0.01\n        bias_value = -math.log((1 - prior_prob) / prior_prob)\n        self.class_embed.bias.data = torch.ones(num_classes) * bias_value\n        nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)\n        nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)\n        for proj in self.input_proj:\n            nn.init.xavier_uniform_(proj[0].weight, gain=1)\n            nn.init.constant_(proj[0].bias, 0)\n\n        # if two-stage, the last class_embed and bbox_embed is for region proposal generation\n        num_pred = (\n            (transformer.decoder.num_layers + 1)\n            if two_stage\n            else transformer.decoder.num_layers\n        )\n        if with_box_refine:\n            self.class_embed = _get_clones(self.class_embed, num_pred)\n            self.bbox_embed = _get_clones(self.bbox_embed, num_pred)\n            nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)\n            # hack implementation for iterative bounding box refinement\n            self.transformer.decoder.bbox_embed = self.bbox_embed\n        else:\n            nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)\n            self.class_embed = nn.ModuleList(\n                [self.class_embed for _ in range(num_pred)]\n            )\n            self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])\n            self.transformer.decoder.bbox_embed = None\n        if two_stage:\n            # hack implementation for two-stage\n            self.transformer.decoder.class_embed = self.class_embed\n            for box_embed in self.bbox_embed:\n                nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)\n        self.num_queries_one2one = num_queries_one2one\n        self.mixed_selection = mixed_selection\n\n    def forward(self, samples: NestedTensor):\n        \"\"\" The forward expects a NestedTensor, which consists of:\n               - samples.tensor: batched images, of shape [batch_size x 3 x H x W]\n               - samples.mask: a binary mask of shape [batch_size x H x W], containing 1 on padded pixels\n\n            It returns a dict with the following elements:\n               - \"pred_logits\": the classification logits (including no-object) for all queries.\n                                Shape= [batch_size x num_queries x (num_classes + 1)]\n               - \"pred_boxes\": The normalized boxes coordinates for all queries, represented as\n                               (center_x, center_y, height, width). These values are normalized in [0, 1],\n                               relative to the size of each individual image (disregarding possible padding).\n                               See PostProcess for information on how to retrieve the unnormalized bounding box.\n               - \"aux_outputs\": Optional, only returned when auxilary losses are activated. It is a list of\n                                dictionnaries containing the two above keys for each decoder layer.\n        \"\"\"\n        if not isinstance(samples, NestedTensor):\n            samples = nested_tensor_from_tensor_list(samples)\n        features, pos = self.backbone(samples)\n\n        srcs = []\n        masks = []\n        for l, feat in enumerate(features):\n            src, mask = feat.decompose()\n            srcs.append(self.input_proj[l](src))\n            masks.append(mask)\n            assert mask is not None\n        if self.num_feature_levels > len(srcs):\n            _len_srcs = len(srcs)\n            for l in range(_len_srcs, self.num_feature_levels):\n                if l == _len_srcs:\n                    src = self.input_proj[l](features[-1].tensors)\n                else:\n                    src = self.input_proj[l](srcs[-1])\n                m = samples.mask\n                mask = F.interpolate(m[None].float(), size=src.shape[-2:]).to(\n                    torch.bool\n                )[0]\n                pos_l = self.backbone[1](NestedTensor(src, mask)).to(src.dtype)\n                srcs.append(src)\n                masks.append(mask)\n                pos.append(pos_l)\n\n        query_embeds = None\n        if not self.two_stage or self.mixed_selection:\n            query_embeds = self.query_embed.weight[0 : self.num_queries, :]\n\n        # make attn mask\n        \"\"\" attention mask to prevent information leakage\n        \"\"\"\n        self_attn_mask = (\n            torch.zeros([self.num_queries, self.num_queries,]).bool().to(src.device)\n        )\n        self_attn_mask[self.num_queries_one2one :, 0 : self.num_queries_one2one,] = True\n        self_attn_mask[0 : self.num_queries_one2one, self.num_queries_one2one :,] = True\n\n        (\n            hs,\n            init_reference,\n            inter_references,\n            enc_outputs_class,\n            enc_outputs_coord_unact,\n        ) = self.transformer(srcs, masks, pos, query_embeds, self_attn_mask)\n\n        outputs_classes_one2one = []\n        outputs_coords_one2one = []\n        outputs_classes_one2many = []\n        outputs_coords_one2many = []\n        for lvl in range(hs.shape[0]):\n            if lvl == 0:\n                reference = init_reference\n            else:\n                reference = inter_references[lvl - 1]\n            reference = inverse_sigmoid(reference)\n            outputs_class = self.class_embed[lvl](hs[lvl])\n            tmp = self.bbox_embed[lvl](hs[lvl])\n            if reference.shape[-1] == 4:\n                tmp += reference\n            else:\n                assert reference.shape[-1] == 2\n                tmp[..., :2] += reference\n            outputs_coord = tmp.sigmoid()\n\n            outputs_classes_one2one.append(\n                outputs_class[:, 0 : self.num_queries_one2one]\n            )\n            outputs_classes_one2many.append(\n                outputs_class[:, self.num_queries_one2one :]\n            )\n            outputs_coords_one2one.append(\n                outputs_coord[:, 0 : self.num_queries_one2one]\n            )\n            outputs_coords_one2many.append(outputs_coord[:, self.num_queries_one2one :])\n        outputs_classes_one2one = torch.stack(outputs_classes_one2one)\n        outputs_coords_one2one = torch.stack(outputs_coords_one2one)\n        outputs_classes_one2many = torch.stack(outputs_classes_one2many)\n        outputs_coords_one2many = torch.stack(outputs_coords_one2many)\n\n        out = {\n            \"pred_logits\": outputs_classes_one2one[-1],\n            \"pred_boxes\": outputs_coords_one2one[-1],\n            \"pred_logits_one2many\": outputs_classes_one2many[-1],\n            \"pred_boxes_one2many\": outputs_coords_one2many[-1],\n        }\n        if self.aux_loss:\n            out[\"aux_outputs\"] = self._set_aux_loss(\n                outputs_classes_one2one, outputs_coords_one2one\n            )\n            out[\"aux_outputs_one2many\"] = self._set_aux_loss(\n                outputs_classes_one2many, outputs_coords_one2many\n            )\n\n        if self.two_stage:\n            enc_outputs_coord = enc_outputs_coord_unact.sigmoid()\n            out[\"enc_outputs\"] = {\n                \"pred_logits\": enc_outputs_class,\n                \"pred_boxes\": enc_outputs_coord,\n            }\n        return out\n\n    @torch.jit.unused\n    def _set_aux_loss(self, outputs_class, outputs_coord):\n        # this is a workaround to make torchscript happy, as torchscript\n        # doesn't support dictionary with non-homogeneous values, such\n        # as a dict having both a Tensor and a list.\n        return [\n            {\"pred_logits\": a, \"pred_boxes\": b}\n            for a, b in zip(outputs_class[:-1], outputs_coord[:-1])\n        ]\n\n\nclass SetCriterion(nn.Module):\n    \"\"\" This class computes the loss for DETR.\n    The process happens in two steps:\n        1) we compute hungarian assignment between ground truth boxes and the outputs of the model\n        2) we supervise each pair of matched ground-truth / prediction (supervise class and box)\n    \"\"\"\n\n    def __init__(self, num_classes, matcher, weight_dict, losses, focal_alpha=0.25):\n        \"\"\" Create the criterion.\n        Parameters:\n            num_classes: number of object categories, omitting the special no-object category\n            matcher: module able to compute a matching between targets and proposals\n            weight_dict: dict containing as key the names of the losses and as values their relative weight.\n            losses: list of all the losses to be applied. See get_loss for list of available losses.\n            focal_alpha: alpha in Focal Loss\n        \"\"\"\n        super().__init__()\n        self.num_classes = num_classes\n        self.matcher = matcher\n        self.weight_dict = weight_dict\n        self.losses = losses\n        self.focal_alpha = focal_alpha\n\n    def loss_labels(self, outputs, targets, indices, num_boxes, log=True):\n        \"\"\"Classification loss (NLL)\n        targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n        \"\"\"\n        assert \"pred_logits\" in outputs\n        src_logits = outputs[\"pred_logits\"]\n\n        idx = self._get_src_permutation_idx(indices)\n        target_classes_o = torch.cat(\n            [t[\"labels\"][J] for t, (_, J) in zip(targets, indices)]\n        )\n        target_classes = torch.full(\n            src_logits.shape[:2],\n            self.num_classes,\n            dtype=torch.int64,\n            device=src_logits.device,\n        )\n        target_classes[idx] = target_classes_o\n\n        target_classes_onehot = torch.zeros(\n            [src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],\n            dtype=src_logits.dtype,\n            layout=src_logits.layout,\n            device=src_logits.device,\n        )\n        target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n\n        target_classes_onehot = target_classes_onehot[:, :, :-1]\n        loss_ce = (\n            sigmoid_focal_loss(\n                src_logits,\n                target_classes_onehot,\n                num_boxes,\n                alpha=self.focal_alpha,\n                gamma=2,\n            )\n            * src_logits.shape[1]\n        )\n        losses = {\"loss_ce\": loss_ce}\n\n        if log:\n            # TODO this should probably be a separate loss, not hacked in this one here\n            losses[\"class_error\"] = 100 - accuracy(src_logits[idx], target_classes_o)[0]\n        return losses\n\n    @torch.no_grad()\n    def loss_cardinality(self, outputs, targets, indices, num_boxes):\n        \"\"\" Compute the cardinality error, ie the absolute error in the number of predicted non-empty boxes\n        This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients\n        \"\"\"\n        pred_logits = outputs[\"pred_logits\"]\n        device = pred_logits.device\n        tgt_lengths = torch.as_tensor(\n            [len(v[\"labels\"]) for v in targets], device=device\n        )\n        # Count the number of predictions that are NOT \"no-object\" (which is the last class)\n        card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)\n        card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())\n        losses = {\"cardinality_error\": card_err}\n        return losses\n\n    def loss_boxes(self, outputs, targets, indices, num_boxes):\n        \"\"\"Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss\n           targets dicts must contain the key \"boxes\" containing a tensor of dim [nb_target_boxes, 4]\n           The target boxes are expected in format (center_x, center_y, h, w), normalized by the image size.\n        \"\"\"\n        assert \"pred_boxes\" in outputs\n        idx = self._get_src_permutation_idx(indices)\n        src_boxes = outputs[\"pred_boxes\"][idx]\n        target_boxes = torch.cat(\n            [t[\"boxes\"][i] for t, (_, i) in zip(targets, indices)], dim=0\n        )\n\n        loss_bbox = F.l1_loss(src_boxes, target_boxes, reduction=\"none\")\n\n        losses = {}\n        losses[\"loss_bbox\"] = loss_bbox.sum() / num_boxes\n\n        loss_giou = 1 - torch.diag(\n            box_ops.generalized_box_iou(\n                box_ops.box_cxcywh_to_xyxy(src_boxes),\n                box_ops.box_cxcywh_to_xyxy(target_boxes),\n            )\n        )\n        losses[\"loss_giou\"] = loss_giou.sum() / num_boxes\n        return losses\n\n    def loss_masks(self, outputs, targets, indices, num_boxes):\n        \"\"\"Compute the losses related to the masks: the focal loss and the dice loss.\n           targets dicts must contain the key \"masks\" containing a tensor of dim [nb_target_boxes, h, w]\n        \"\"\"\n        assert \"pred_masks\" in outputs\n\n        src_idx = self._get_src_permutation_idx(indices)\n        tgt_idx = self._get_tgt_permutation_idx(indices)\n\n        src_masks = outputs[\"pred_masks\"]\n\n        # TODO use valid to mask invalid areas due to padding in loss\n        target_masks, valid = nested_tensor_from_tensor_list(\n            [t[\"masks\"] for t in targets]\n        ).decompose()\n        target_masks = target_masks.to(src_masks)\n\n        src_masks = src_masks[src_idx]\n        # upsample predictions to the target size\n        src_masks = interpolate(\n            src_masks[:, None],\n            size=target_masks.shape[-2:],\n            mode=\"bilinear\",\n            align_corners=False,\n        )\n        src_masks = src_masks[:, 0].flatten(1)\n\n        target_masks = target_masks[tgt_idx].flatten(1)\n\n        losses = {\n            \"loss_mask\": sigmoid_focal_loss(src_masks, target_masks, num_boxes),\n            \"loss_dice\": dice_loss(src_masks, target_masks, num_boxes),\n        }\n        return losses\n\n    def _get_src_permutation_idx(self, indices):\n        # permute predictions following indices\n        batch_idx = torch.cat(\n            [torch.full_like(src, i) for i, (src, _) in enumerate(indices)]\n        )\n        src_idx = torch.cat([src for (src, _) in indices])\n        return batch_idx, src_idx\n\n    def _get_tgt_permutation_idx(self, indices):\n        # permute targets following indices\n        batch_idx = torch.cat(\n            [torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)]\n        )\n        tgt_idx = torch.cat([tgt for (_, tgt) in indices])\n        return batch_idx, tgt_idx\n\n    def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):\n        loss_map = {\n            \"labels\": self.loss_labels,\n            \"cardinality\": self.loss_cardinality,\n            \"boxes\": self.loss_boxes,\n            \"masks\": self.loss_masks,\n        }\n        assert loss in loss_map, f\"do you really want to compute {loss} loss?\"\n        return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)\n\n    def forward(self, outputs, targets):\n        \"\"\" This performs the loss computation.\n        Parameters:\n             outputs: dict of tensors, see the output specification of the model for the format\n             targets: list of dicts, such that len(targets) == batch_size.\n                      The expected keys in each dict depends on the losses applied, see each loss' doc\n        \"\"\"\n        outputs_without_aux = {\n            k: v\n            for k, v in outputs.items()\n            if k != \"aux_outputs\" and k != \"enc_outputs\"\n        }\n\n        # Retrieve the matching between the outputs of the last layer and the targets\n        indices = self.matcher(outputs_without_aux, targets)\n\n        # Compute the average number of target boxes accross all nodes, for normalization purposes\n        num_boxes = sum(len(t[\"labels\"]) for t in targets)\n        num_boxes = torch.as_tensor(\n            [num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device\n        )\n        if is_dist_avail_and_initialized():\n            torch.distributed.all_reduce(num_boxes)\n        num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n        # Compute all the requested losses\n        losses = {}\n        for loss in self.losses:\n            kwargs = {}\n            losses.update(\n                self.get_loss(loss, outputs, targets, indices, num_boxes, **kwargs)\n            )\n\n        # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n        if \"aux_outputs\" in outputs:\n            for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n                indices = self.matcher(aux_outputs, targets)\n                for loss in self.losses:\n                    if loss == \"masks\":\n                        # Intermediate masks losses are too costly to compute, we ignore them.\n                        continue\n                    kwargs = {}\n                    if loss == \"labels\":\n                        # Logging is enabled only for the last layer\n                        kwargs[\"log\"] = False\n                    l_dict = self.get_loss(\n                        loss, aux_outputs, targets, indices, num_boxes, **kwargs\n                    )\n                    l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n                    losses.update(l_dict)\n\n        if \"enc_outputs\" in outputs:\n            enc_outputs = outputs[\"enc_outputs\"]\n            bin_targets = copy.deepcopy(targets)\n            for bt in bin_targets:\n                bt[\"labels\"] = torch.zeros_like(bt[\"labels\"])\n            indices = self.matcher(enc_outputs, bin_targets)\n            for loss in self.losses:\n                if loss == \"masks\":\n                    # Intermediate masks losses are too costly to compute, we ignore them.\n                    continue\n                kwargs = {}\n                if loss == \"labels\":\n                    # Logging is enabled only for the last layer\n                    kwargs[\"log\"] = False\n                l_dict = self.get_loss(\n                    loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs\n                )\n                l_dict = {k + f\"_enc\": v for k, v in l_dict.items()}\n                losses.update(l_dict)\n\n        return losses\n\n\nclass PostProcess(nn.Module):\n    \"\"\" This module converts the model's output into the format expected by the coco api\"\"\"\n\n    def __init__(self, topk=100):\n        super().__init__()\n        self.topk = topk\n        print(\"topk for eval:\", self.topk)\n\n    @torch.no_grad()\n    def forward(self, outputs, target_sizes):\n        \"\"\" Perform the computation\n        Parameters:\n            outputs: raw outputs of the model\n            target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch\n                          For evaluation, this must be the original image size (before any data augmentation)\n                          For visualization, this should be the image size after data augment, but before padding\n        \"\"\"\n        out_logits, out_bbox = outputs[\"pred_logits\"], outputs[\"pred_boxes\"]\n\n        assert len(out_logits) == len(target_sizes)\n        assert target_sizes.shape[1] == 2\n\n        prob = out_logits.sigmoid()\n        topk_values, topk_indexes = torch.topk(\n            prob.view(out_logits.shape[0], -1), self.topk, dim=1\n        )\n        scores = topk_values\n        topk_boxes = topk_indexes // out_logits.shape[2]\n        labels = topk_indexes % out_logits.shape[2]\n        boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)\n        boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n\n        # and from relative [0, 1] to absolute [0, height] coordinates\n        img_h, img_w = target_sizes.unbind(1)\n        scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)\n        boxes = boxes * scale_fct[:, None, :]\n\n        results = [\n            {\"scores\": s, \"labels\": l, \"boxes\": b}\n            for s, l, b in zip(scores, labels, boxes)\n        ]\n\n        return results\n\n\nclass MLP(nn.Module):\n    \"\"\" Very simple multi-layer perceptron (also called FFN)\"\"\"\n\n    def __init__(self, input_dim, hidden_dim, output_dim, num_layers):\n        super().__init__()\n        self.num_layers = num_layers\n        h = [hidden_dim] * (num_layers - 1)\n        self.layers = nn.ModuleList(\n            nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])\n        )\n\n    def forward(self, x):\n        for i, layer in enumerate(self.layers):\n            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n        return x\n\n\ndef build(args):\n    backbone = build_backbone(args)\n\n    transformer = build_deforamble_transformer(args)\n    model = DeformableDETR(\n        backbone,\n        transformer,\n        num_classes=args.num_classes,\n        num_feature_levels=args.num_feature_levels,\n        aux_loss=args.aux_loss,\n        with_box_refine=args.with_box_refine,\n        two_stage=args.two_stage,\n        num_queries_one2one=args.num_queries_one2one,\n        num_queries_one2many=args.num_queries_one2many,\n        mixed_selection=args.mixed_selection,\n    )\n\n    box_postprocessor = PostProcess(topk=args.topk)\n\n    return model, box_postprocessor\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/deformable_transformer.py",
    "content": "# ------------------------------------------------------------------------\n# H-DETR\n# Copyright (c) 2022 Peking University & Microsoft Research Asia. All Rights Reserved.\n# Licensed under the MIT-style license found in the LICENSE file in the root directory\n# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\nimport copy\nfrom typing import Optional, List\nimport math\n\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn, Tensor\nimport torch.utils.checkpoint as checkpoint\nfrom torch.nn.init import xavier_uniform_, constant_, uniform_, normal_\n\nfrom .util.misc import inverse_sigmoid\nfrom projects.instance_segment_anything.ops.modules import MSDeformAttn\n\n\nclass DeformableTransformer(nn.Module):\n    def __init__(\n        self,\n        d_model=256,\n        nhead=8,\n        num_encoder_layers=6,\n        num_decoder_layers=6,\n        dim_feedforward=1024,\n        dropout=0.1,\n        activation=\"relu\",\n        return_intermediate_dec=False,\n        num_feature_levels=4,\n        dec_n_points=4,\n        enc_n_points=4,\n        two_stage=False,\n        two_stage_num_proposals=300,\n        look_forward_twice=False,\n        mixed_selection=False,\n        use_checkpoint=False,\n    ):\n        super().__init__()\n\n        self.d_model = d_model\n        self.nhead = nhead\n        self.two_stage = two_stage\n        self.two_stage_num_proposals = two_stage_num_proposals\n\n        encoder_layer = DeformableTransformerEncoderLayer(\n            d_model,\n            dim_feedforward,\n            dropout,\n            activation,\n            num_feature_levels,\n            nhead,\n            enc_n_points,\n        )\n        self.encoder = DeformableTransformerEncoder(\n            encoder_layer, num_encoder_layers, use_checkpoint\n        )\n\n        decoder_layer = DeformableTransformerDecoderLayer(\n            d_model,\n            dim_feedforward,\n            dropout,\n            activation,\n            num_feature_levels,\n            nhead,\n            dec_n_points,\n        )\n        self.decoder = DeformableTransformerDecoder(\n            decoder_layer,\n            num_decoder_layers,\n            return_intermediate_dec,\n            look_forward_twice,\n            use_checkpoint,\n        )\n\n        self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))\n\n        if two_stage:\n            self.enc_output = nn.Linear(d_model, d_model)\n            self.enc_output_norm = nn.LayerNorm(d_model)\n            self.pos_trans = nn.Linear(d_model * 2, d_model * 2)\n            self.pos_trans_norm = nn.LayerNorm(d_model * 2)\n        else:\n            self.reference_points = nn.Linear(d_model, 2)\n\n        self.mixed_selection = mixed_selection\n        self._reset_parameters()\n\n    def _reset_parameters(self):\n        for p in self.parameters():\n            if p.dim() > 1:\n                nn.init.xavier_uniform_(p)\n        for m in self.modules():\n            if isinstance(m, MSDeformAttn):\n                m._reset_parameters()\n        if not self.two_stage:\n            xavier_uniform_(self.reference_points.weight.data, gain=1.0)\n            constant_(self.reference_points.bias.data, 0.0)\n        normal_(self.level_embed)\n\n    def get_proposal_pos_embed(self, proposals):\n        num_pos_feats = 128\n        temperature = 10000\n        scale = 2 * math.pi\n\n        dim_t = torch.arange(\n            num_pos_feats, dtype=torch.float32, device=proposals.device\n        )\n        dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats)\n        # N, L, 4\n        proposals = proposals.sigmoid() * scale\n        # N, L, 4, 128\n        pos = proposals[:, :, :, None] / dim_t\n        # N, L, 4, 64, 2\n        pos = torch.stack(\n            (pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4\n        ).flatten(2)\n        return pos\n\n    def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):\n        N_, S_, C_ = memory.shape\n        base_scale = 4.0\n        proposals = []\n        _cur = 0\n        for lvl, (H_, W_) in enumerate(spatial_shapes):\n            mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H_ * W_)].view(\n                N_, H_, W_, 1\n            )\n            valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n            valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n            grid_y, grid_x = torch.meshgrid(\n                torch.linspace(\n                    0, H_ - 1, H_, dtype=torch.float32, device=memory.device\n                ),\n                torch.linspace(\n                    0, W_ - 1, W_, dtype=torch.float32, device=memory.device\n                ),\n            )\n            grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n            scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(\n                N_, 1, 1, 2\n            )\n            grid = (grid.unsqueeze(0).expand(N_, -1, -1, -1) + 0.5) / scale\n            wh = torch.ones_like(grid) * 0.05 * (2.0 ** lvl)\n            proposal = torch.cat((grid, wh), -1).view(N_, -1, 4)\n            proposals.append(proposal)\n            _cur += H_ * W_\n        output_proposals = torch.cat(proposals, 1)\n        output_proposals_valid = (\n            (output_proposals > 0.01) & (output_proposals < 0.99)\n        ).all(-1, keepdim=True)\n        output_proposals = torch.log(output_proposals / (1 - output_proposals))\n        output_proposals = output_proposals.masked_fill(\n            memory_padding_mask.unsqueeze(-1), float(\"inf\")\n        )\n        output_proposals = output_proposals.masked_fill(\n            ~output_proposals_valid, float(\"inf\")\n        )\n\n        output_memory = memory\n        output_memory = output_memory.masked_fill(\n            memory_padding_mask.unsqueeze(-1), float(0)\n        )\n        output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n        output_memory = self.enc_output_norm(self.enc_output(output_memory))\n        return output_memory, output_proposals\n\n    def get_valid_ratio(self, mask):\n        _, H, W = mask.shape\n        valid_H = torch.sum(~mask[:, :, 0], 1)\n        valid_W = torch.sum(~mask[:, 0, :], 1)\n        valid_ratio_h = valid_H.float() / H\n        valid_ratio_w = valid_W.float() / W\n        valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n        return valid_ratio\n\n    @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)\n    def forward(self, srcs, masks, pos_embeds, query_embed=None, self_attn_mask=None):\n\n        # prepare input for encoder\n        src_flatten = []\n        mask_flatten = []\n        lvl_pos_embed_flatten = []\n        spatial_shapes = []\n        for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):\n            bs, c, h, w = src.shape\n            spatial_shape = (h, w)\n            spatial_shapes.append(spatial_shape)\n            src = src.flatten(2).transpose(1, 2)\n            mask = mask.flatten(1)\n            pos_embed = pos_embed.flatten(2).transpose(1, 2)\n            lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)\n            lvl_pos_embed_flatten.append(lvl_pos_embed)\n            src_flatten.append(src)\n            mask_flatten.append(mask)\n        src_flatten = torch.cat(src_flatten, 1)\n        mask_flatten = torch.cat(mask_flatten, 1)\n        lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n        spatial_shapes = torch.as_tensor(\n            spatial_shapes, dtype=torch.long, device=src_flatten.device\n        )\n        level_start_index = torch.cat(\n            (spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])\n        )\n        valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1)\n\n        # encoder\n        memory = self.encoder(\n            src_flatten,\n            spatial_shapes,\n            level_start_index,\n            valid_ratios,\n            lvl_pos_embed_flatten,\n            mask_flatten,\n        )\n\n        # prepare input for decoder\n        bs, _, c = memory.shape\n        if self.two_stage:\n            output_memory, output_proposals = self.gen_encoder_output_proposals(\n                memory, mask_flatten, spatial_shapes\n            )\n\n            # hack implementation for two-stage Deformable DETR\n            enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](\n                output_memory\n            )\n            enc_outputs_coord_unact = (\n                self.decoder.bbox_embed[self.decoder.num_layers](output_memory)\n                + output_proposals\n            )\n\n            topk = self.two_stage_num_proposals\n            topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]\n            topk_coords_unact = torch.gather(\n                enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)\n            )\n            topk_coords_unact = topk_coords_unact.detach()\n            reference_points = topk_coords_unact.sigmoid()\n            init_reference_out = reference_points\n            pos_trans_out = self.pos_trans_norm(\n                self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))\n            )\n\n            if not self.mixed_selection:\n                query_embed, tgt = torch.split(pos_trans_out, c, dim=2)\n            else:\n                # query_embed here is the content embed for deformable DETR\n                tgt = query_embed.unsqueeze(0).expand(bs, -1, -1)\n                query_embed, _ = torch.split(pos_trans_out, c, dim=2)\n        else:\n            query_embed, tgt = torch.split(query_embed, c, dim=1)\n            query_embed = query_embed.unsqueeze(0).expand(bs, -1, -1)\n            tgt = tgt.unsqueeze(0).expand(bs, -1, -1)\n            reference_points = self.reference_points(query_embed).sigmoid()\n            init_reference_out = reference_points\n\n        # decoder\n        hs, inter_references = self.decoder(\n            tgt,\n            reference_points,\n            memory,\n            spatial_shapes,\n            level_start_index,\n            valid_ratios,\n            query_embed,\n            mask_flatten,\n            self_attn_mask,\n        )\n\n        inter_references_out = inter_references\n        if self.two_stage:\n            return (\n                hs,\n                init_reference_out,\n                inter_references_out,\n                enc_outputs_class,\n                enc_outputs_coord_unact,\n            )\n        return hs, init_reference_out, inter_references_out, None, None\n\n\nclass DeformableTransformerEncoderLayer(nn.Module):\n    def __init__(\n        self,\n        d_model=256,\n        d_ffn=1024,\n        dropout=0.1,\n        activation=\"relu\",\n        n_levels=4,\n        n_heads=8,\n        n_points=4,\n    ):\n        super().__init__()\n\n        # self attention\n        self.self_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n        self.dropout1 = nn.Dropout(dropout)\n        self.norm1 = nn.LayerNorm(d_model)\n\n        # ffn\n        self.linear1 = nn.Linear(d_model, d_ffn)\n        self.activation = _get_activation_fn(activation)\n        self.dropout2 = nn.Dropout(dropout)\n        self.linear2 = nn.Linear(d_ffn, d_model)\n        self.dropout3 = nn.Dropout(dropout)\n        self.norm2 = nn.LayerNorm(d_model)\n\n    @staticmethod\n    def with_pos_embed(tensor, pos):\n        return tensor if pos is None else tensor + pos\n\n    def forward_ffn(self, src):\n        src2 = self.linear2(self.dropout2(self.activation(self.linear1(src))))\n        src = src + self.dropout3(src2)\n        src = self.norm2(src)\n        return src\n\n    @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)\n    def forward(\n        self,\n        src,\n        pos,\n        reference_points,\n        spatial_shapes,\n        level_start_index,\n        padding_mask=None,\n    ):\n        # self attention\n        src2 = self.self_attn(\n            self.with_pos_embed(src, pos),\n            reference_points,\n            src,\n            spatial_shapes,\n            level_start_index,\n            padding_mask,\n        )\n        src = src + self.dropout1(src2)\n        src = self.norm1(src)\n\n        # ffn\n        src = self.forward_ffn(src)\n\n        return src\n\n\nclass DeformableTransformerEncoder(nn.Module):\n    def __init__(self, encoder_layer, num_layers, use_checkpoint=False):\n        super().__init__()\n        self.layers = _get_clones(encoder_layer, num_layers)\n        self.num_layers = num_layers\n        self.use_checkpoint = use_checkpoint\n\n    @staticmethod\n    def get_reference_points(spatial_shapes, valid_ratios, device):\n        reference_points_list = []\n        for lvl, (H_, W_) in enumerate(spatial_shapes):\n\n            ref_y, ref_x = torch.meshgrid(\n                torch.linspace(0.5, H_ - 0.5, H_, dtype=torch.float32, device=device),\n                torch.linspace(0.5, W_ - 0.5, W_, dtype=torch.float32, device=device),\n            )\n            ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)\n            ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)\n            ref = torch.stack((ref_x, ref_y), -1)\n            reference_points_list.append(ref)\n        reference_points = torch.cat(reference_points_list, 1)\n        reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n        return reference_points\n\n    @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)\n    def forward(\n        self,\n        src,\n        spatial_shapes,\n        level_start_index,\n        valid_ratios,\n        pos=None,\n        padding_mask=None,\n    ):\n        output = src\n        reference_points = self.get_reference_points(\n            spatial_shapes, valid_ratios, device=src.device\n        )\n        for _, layer in enumerate(self.layers):\n            if self.use_checkpoint:\n                output = checkpoint.checkpoint(\n                    layer,\n                    output,\n                    pos,\n                    reference_points,\n                    spatial_shapes,\n                    level_start_index,\n                    padding_mask,\n                )\n            else:\n                output = layer(\n                    output,\n                    pos,\n                    reference_points,\n                    spatial_shapes,\n                    level_start_index,\n                    padding_mask,\n                )\n\n        return output\n\n\nclass DeformableTransformerDecoderLayer(nn.Module):\n    def __init__(\n        self,\n        d_model=256,\n        d_ffn=1024,\n        dropout=0.1,\n        activation=\"relu\",\n        n_levels=4,\n        n_heads=8,\n        n_points=4,\n    ):\n        super().__init__()\n\n        # cross attention\n        self.cross_attn = MSDeformAttn(d_model, n_levels, n_heads, n_points)\n        self.dropout1 = nn.Dropout(dropout)\n        self.norm1 = nn.LayerNorm(d_model)\n\n        # self attention\n        self.self_attn = nn.MultiheadAttention(d_model, n_heads, dropout=dropout)\n        self.dropout2 = nn.Dropout(dropout)\n        self.norm2 = nn.LayerNorm(d_model)\n\n        # ffn\n        self.linear1 = nn.Linear(d_model, d_ffn)\n        self.activation = _get_activation_fn(activation)\n        self.dropout3 = nn.Dropout(dropout)\n        self.linear2 = nn.Linear(d_ffn, d_model)\n        self.dropout4 = nn.Dropout(dropout)\n        self.norm3 = nn.LayerNorm(d_model)\n\n    @staticmethod\n    def with_pos_embed(tensor, pos):\n        return tensor if pos is None else tensor + pos\n\n    def forward_ffn(self, tgt):\n        tgt2 = self.linear2(self.dropout3(self.activation(self.linear1(tgt))))\n        tgt = tgt + self.dropout4(tgt2)\n        tgt = self.norm3(tgt)\n        return tgt\n\n    @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)\n    def forward(\n        self,\n        tgt,\n        query_pos,\n        reference_points,\n        src,\n        src_spatial_shapes,\n        level_start_index,\n        src_padding_mask=None,\n        self_attn_mask=None,\n    ):\n        # self attention\n        q = k = self.with_pos_embed(tgt, query_pos)\n        tgt2 = self.self_attn(\n            q.transpose(0, 1),\n            k.transpose(0, 1),\n            tgt.transpose(0, 1),\n            attn_mask=self_attn_mask,\n        )[0].transpose(0, 1)\n        tgt = tgt + self.dropout2(tgt2)\n        tgt = self.norm2(tgt)\n\n        # cross attention\n        tgt2 = self.cross_attn(\n            self.with_pos_embed(tgt, query_pos),\n            reference_points,\n            src,\n            src_spatial_shapes,\n            level_start_index,\n            src_padding_mask,\n        )\n        tgt = tgt + self.dropout1(tgt2)\n        tgt = self.norm1(tgt)\n\n        # ffn\n        tgt = self.forward_ffn(tgt)\n\n        return tgt\n\n\nclass DeformableTransformerDecoder(nn.Module):\n    def __init__(\n        self,\n        decoder_layer,\n        num_layers,\n        return_intermediate=False,\n        look_forward_twice=False,\n        use_checkpoint=False,\n    ):\n        super().__init__()\n        self.layers = _get_clones(decoder_layer, num_layers)\n        self.num_layers = num_layers\n        self.return_intermediate = return_intermediate\n        self.look_forward_twice = look_forward_twice\n        self.use_checkpoint = use_checkpoint\n        # hack implementation for iterative bounding box refinement and two-stage Deformable DETR\n        self.bbox_embed = None\n        self.class_embed = None\n\n    @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)\n    def forward(\n        self,\n        tgt,\n        reference_points,\n        src,\n        src_spatial_shapes,\n        src_level_start_index,\n        src_valid_ratios,\n        query_pos=None,\n        src_padding_mask=None,\n        self_attn_mask=None,\n    ):\n        output = tgt\n\n        intermediate = []\n        intermediate_reference_points = []\n        for lid, layer in enumerate(self.layers):\n            if reference_points.shape[-1] == 4:\n                reference_points_input = (\n                    reference_points[:, :, None]\n                    * torch.cat([src_valid_ratios, src_valid_ratios], -1)[:, None]\n                )\n            else:\n                assert reference_points.shape[-1] == 2\n                reference_points_input = (\n                    reference_points[:, :, None] * src_valid_ratios[:, None]\n                )\n            if self.use_checkpoint:\n                output = checkpoint.checkpoint(\n                    layer,\n                    output,\n                    query_pos,\n                    reference_points_input,\n                    src,\n                    src_spatial_shapes,\n                    src_level_start_index,\n                    src_padding_mask,\n                    self_attn_mask,\n                )\n            else:\n                output = layer(\n                    output,\n                    query_pos,\n                    reference_points_input,\n                    src,\n                    src_spatial_shapes,\n                    src_level_start_index,\n                    src_padding_mask,\n                    self_attn_mask,\n                )\n\n            # hack implementation for iterative bounding box refinement\n            if self.bbox_embed is not None:\n                tmp = self.bbox_embed[lid](output)\n                if reference_points.shape[-1] == 4:\n                    new_reference_points = tmp + inverse_sigmoid(reference_points)\n                    new_reference_points = new_reference_points.sigmoid()\n                else:\n                    assert reference_points.shape[-1] == 2\n                    new_reference_points = tmp\n                    new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(\n                        reference_points\n                    )\n                    new_reference_points = new_reference_points.sigmoid()\n                reference_points = new_reference_points.detach()\n\n            if self.return_intermediate:\n                intermediate.append(output)\n                intermediate_reference_points.append(\n                    new_reference_points\n                    if self.look_forward_twice\n                    else reference_points\n                )\n\n        if self.return_intermediate:\n            return torch.stack(intermediate), torch.stack(intermediate_reference_points)\n\n        return output, reference_points\n\n\ndef _get_clones(module, N):\n    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])\n\n\ndef _get_activation_fn(activation):\n    \"\"\"Return an activation function given a string\"\"\"\n    if activation == \"relu\":\n        return F.relu\n    if activation == \"gelu\":\n        return F.gelu\n    if activation == \"glu\":\n        return F.glu\n    raise RuntimeError(f\"activation should be relu/gelu, not {activation}.\")\n\n\ndef build_deforamble_transformer(args):\n    return DeformableTransformer(\n        d_model=args.hidden_dim,\n        nhead=args.nheads,\n        num_encoder_layers=args.enc_layers,\n        num_decoder_layers=args.dec_layers,\n        dim_feedforward=args.dim_feedforward,\n        dropout=args.dropout,\n        activation=\"relu\",\n        return_intermediate_dec=True,\n        num_feature_levels=args.num_feature_levels,\n        dec_n_points=args.dec_n_points,\n        enc_n_points=args.enc_n_points,\n        two_stage=args.two_stage,\n        two_stage_num_proposals=args.num_queries_one2one + args.num_queries_one2many,\n        mixed_selection=args.mixed_selection,\n        look_forward_twice=args.look_forward_twice,\n        use_checkpoint=args.use_checkpoint,\n    )\n\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/matcher.py",
    "content": "# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\"\"\"\nModules to compute the matching cost and solve the corresponding LSAP.\n\"\"\"\nimport torch\nfrom scipy.optimize import linear_sum_assignment\nfrom torch import nn\n\nfrom .util.box_ops import box_cxcywh_to_xyxy, generalized_box_iou\n\n\nclass HungarianMatcher(nn.Module):\n    \"\"\"This class computes an assignment between the targets and the predictions of the network\n\n    For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n    there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n    while the others are un-matched (and thus treated as non-objects).\n    \"\"\"\n\n    def __init__(\n        self, cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1\n    ):\n        \"\"\"Creates the matcher\n\n        Params:\n            cost_class: This is the relative weight of the classification error in the matching cost\n            cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost\n            cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost\n        \"\"\"\n        super().__init__()\n        self.cost_class = cost_class\n        self.cost_bbox = cost_bbox\n        self.cost_giou = cost_giou\n        assert (\n            cost_class != 0 or cost_bbox != 0 or cost_giou != 0\n        ), \"all costs cant be 0\"\n\n    def forward(self, outputs, targets):\n        \"\"\" Performs the matching\n\n        Params:\n            outputs: This is a dict that contains at least these entries:\n                 \"pred_logits\": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits\n                 \"pred_boxes\": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates\n\n            targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:\n                 \"labels\": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth\n                           objects in the target) containing the class labels\n                 \"boxes\": Tensor of dim [num_target_boxes, 4] containing the target box coordinates\n\n        Returns:\n            A list of size batch_size, containing tuples of (index_i, index_j) where:\n                - index_i is the indices of the selected predictions (in order)\n                - index_j is the indices of the corresponding selected targets (in order)\n            For each batch element, it holds:\n                len(index_i) = len(index_j) = min(num_queries, num_target_boxes)\n        \"\"\"\n        with torch.no_grad():\n            bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n            # We flatten to compute the cost matrices in a batch\n            out_prob = outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n            out_bbox = outputs[\"pred_boxes\"].flatten(\n                0, 1\n            )  # [batch_size * num_queries, 4]\n\n            # Also concat the target labels and boxes\n            tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n            tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n            # Compute the classification cost.\n            alpha = 0.25\n            gamma = 2.0\n            neg_cost_class = (\n                (1 - alpha) * (out_prob ** gamma) * (-(1 - out_prob + 1e-8).log())\n            )\n            pos_cost_class = (\n                alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n            )\n            cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n            # Compute the L1 cost between boxes\n            cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n            # Compute the giou cost betwen boxes\n            cost_giou = -generalized_box_iou(\n                box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)\n            )\n\n            # Final cost matrix\n            C = (\n                self.cost_bbox * cost_bbox\n                + self.cost_class * cost_class\n                + self.cost_giou * cost_giou\n            )\n            C = C.view(bs, num_queries, -1).cpu()\n\n            sizes = [len(v[\"boxes\"]) for v in targets]\n            indices = [\n                linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))\n            ]\n            return [\n                (\n                    torch.as_tensor(i, dtype=torch.int64),\n                    torch.as_tensor(j, dtype=torch.int64),\n                )\n                for i, j in indices\n            ]\n\n\ndef build_matcher(args):\n    return HungarianMatcher(\n        cost_class=args.set_cost_class,\n        cost_bbox=args.set_cost_bbox,\n        cost_giou=args.set_cost_giou,\n    )\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/position_encoding.py",
    "content": "# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\"\"\"\nVarious positional encodings for the transformer.\n\"\"\"\nimport math\nimport torch\nfrom torch import nn\n\nfrom .util.misc import NestedTensor\n\n\nclass PositionEmbeddingSine(nn.Module):\n    \"\"\"\n    This is a more standard version of the position embedding, very similar to the one\n    used by the Attention is all you need paper, generalized to work on images.\n    \"\"\"\n\n    def __init__(\n        self, num_pos_feats=64, temperature=10000, normalize=False, scale=None\n    ):\n        super().__init__()\n        self.num_pos_feats = num_pos_feats\n        self.temperature = temperature\n        self.normalize = normalize\n        if scale is not None and normalize is False:\n            raise ValueError(\"normalize should be True if scale is passed\")\n        if scale is None:\n            scale = 2 * math.pi\n        self.scale = scale\n\n    def forward(self, tensor_list: NestedTensor):\n        x = tensor_list.tensors\n        mask = tensor_list.mask\n        assert mask is not None\n        not_mask = ~mask\n        y_embed = not_mask.cumsum(1, dtype=torch.float32)\n        x_embed = not_mask.cumsum(2, dtype=torch.float32)\n        if self.normalize:\n            eps = 1e-6\n            y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale\n            x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale\n\n        dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)\n        dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)\n\n        pos_x = x_embed[:, :, :, None] / dim_t\n        pos_y = y_embed[:, :, :, None] / dim_t\n        pos_x = torch.stack(\n            (pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4\n        ).flatten(3)\n        pos_y = torch.stack(\n            (pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4\n        ).flatten(3)\n        pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n        return pos\n\n\nclass PositionEmbeddingLearned(nn.Module):\n    \"\"\"\n    Absolute pos embedding, learned.\n    \"\"\"\n\n    def __init__(self, num_pos_feats=256):\n        super().__init__()\n        self.row_embed = nn.Embedding(50, num_pos_feats)\n        self.col_embed = nn.Embedding(50, num_pos_feats)\n        self.reset_parameters()\n\n    def reset_parameters(self):\n        nn.init.uniform_(self.row_embed.weight)\n        nn.init.uniform_(self.col_embed.weight)\n\n    def forward(self, tensor_list: NestedTensor):\n        x = tensor_list.tensors\n        h, w = x.shape[-2:]\n        i = torch.arange(w, device=x.device)\n        j = torch.arange(h, device=x.device)\n        x_emb = self.col_embed(i)\n        y_emb = self.row_embed(j)\n        pos = (\n            torch.cat(\n                [\n                    x_emb.unsqueeze(0).repeat(h, 1, 1),\n                    y_emb.unsqueeze(1).repeat(1, w, 1),\n                ],\n                dim=-1,\n            )\n            .permute(2, 0, 1)\n            .unsqueeze(0)\n            .repeat(x.shape[0], 1, 1, 1)\n        )\n        return pos\n\n\ndef build_position_encoding(args):\n    N_steps = args.hidden_dim // 2\n    if args.position_embedding in (\"v2\", \"sine\"):\n        # TODO find a better way of exposing other arguments\n        position_embedding = PositionEmbeddingSine(N_steps, normalize=True)\n    elif args.position_embedding in (\"v3\", \"learned\"):\n        position_embedding = PositionEmbeddingLearned(N_steps)\n    else:\n        raise ValueError(f\"not supported {args.position_embedding}\")\n\n    return position_embedding\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/segmentation.py",
    "content": "# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\"\"\"\nThis file provides the definition of the convolutional heads used to predict masks, as well as the losses\n\"\"\"\nimport io\nfrom collections import defaultdict\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom PIL import Image\n\nfrom .util import box_ops\nfrom .util.misc import NestedTensor, interpolate, nested_tensor_from_tensor_list\n\ntry:\n    from panopticapi.utils import id2rgb, rgb2id\nexcept ImportError:\n    pass\n\n\nclass DETRsegm(nn.Module):\n    def __init__(self, detr, freeze_detr=False):\n        super().__init__()\n        self.detr = detr\n\n        if freeze_detr:\n            for p in self.parameters():\n                p.requires_grad_(False)\n\n        hidden_dim, nheads = detr.transformer.d_model, detr.transformer.nhead\n        self.bbox_attention = MHAttentionMap(hidden_dim, hidden_dim, nheads, dropout=0)\n        self.mask_head = MaskHeadSmallConv(\n            hidden_dim + nheads, [1024, 512, 256], hidden_dim\n        )\n\n    def forward(self, samples: NestedTensor):\n        if not isinstance(samples, NestedTensor):\n            samples = nested_tensor_from_tensor_list(samples)\n        features, pos = self.detr.backbone(samples)\n\n        bs = features[-1].tensors.shape[0]\n\n        src, mask = features[-1].decompose()\n        src_proj = self.detr.input_proj(src)\n        hs, memory = self.detr.transformer(\n            src_proj, mask, self.detr.query_embed.weight, pos[-1]\n        )\n\n        outputs_class = self.detr.class_embed(hs)\n        outputs_coord = self.detr.bbox_embed(hs).sigmoid()\n        out = {\"pred_logits\": outputs_class[-1], \"pred_boxes\": outputs_coord[-1]}\n        if self.detr.aux_loss:\n            out[\"aux_outputs\"] = [\n                {\"pred_logits\": a, \"pred_boxes\": b}\n                for a, b in zip(outputs_class[:-1], outputs_coord[:-1])\n            ]\n\n        # FIXME h_boxes takes the last one computed, keep this in mind\n        bbox_mask = self.bbox_attention(hs[-1], memory, mask=mask)\n\n        seg_masks = self.mask_head(\n            src_proj,\n            bbox_mask,\n            [features[2].tensors, features[1].tensors, features[0].tensors],\n        )\n        outputs_seg_masks = seg_masks.view(\n            bs, self.detr.num_queries, seg_masks.shape[-2], seg_masks.shape[-1]\n        )\n\n        out[\"pred_masks\"] = outputs_seg_masks\n        return out\n\n\nclass MaskHeadSmallConv(nn.Module):\n    \"\"\"\n    Simple convolutional head, using group norm.\n    Upsampling is done using a FPN approach\n    \"\"\"\n\n    def __init__(self, dim, fpn_dims, context_dim):\n        super().__init__()\n\n        inter_dims = [\n            dim,\n            context_dim // 2,\n            context_dim // 4,\n            context_dim // 8,\n            context_dim // 16,\n            context_dim // 64,\n        ]\n        self.lay1 = torch.nn.Conv2d(dim, dim, 3, padding=1)\n        self.gn1 = torch.nn.GroupNorm(8, dim)\n        self.lay2 = torch.nn.Conv2d(dim, inter_dims[1], 3, padding=1)\n        self.gn2 = torch.nn.GroupNorm(8, inter_dims[1])\n        self.lay3 = torch.nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)\n        self.gn3 = torch.nn.GroupNorm(8, inter_dims[2])\n        self.lay4 = torch.nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)\n        self.gn4 = torch.nn.GroupNorm(8, inter_dims[3])\n        self.lay5 = torch.nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)\n        self.gn5 = torch.nn.GroupNorm(8, inter_dims[4])\n        self.out_lay = torch.nn.Conv2d(inter_dims[4], 1, 3, padding=1)\n\n        self.dim = dim\n\n        self.adapter1 = torch.nn.Conv2d(fpn_dims[0], inter_dims[1], 1)\n        self.adapter2 = torch.nn.Conv2d(fpn_dims[1], inter_dims[2], 1)\n        self.adapter3 = torch.nn.Conv2d(fpn_dims[2], inter_dims[3], 1)\n\n        for m in self.modules():\n            if isinstance(m, nn.Conv2d):\n                nn.init.kaiming_uniform_(m.weight, a=1)\n                nn.init.constant_(m.bias, 0)\n\n    def forward(self, x, bbox_mask, fpns):\n        def expand(tensor, length):\n            return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1)\n\n        x = torch.cat([expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)\n\n        x = self.lay1(x)\n        x = self.gn1(x)\n        x = F.relu(x)\n        x = self.lay2(x)\n        x = self.gn2(x)\n        x = F.relu(x)\n\n        cur_fpn = self.adapter1(fpns[0])\n        if cur_fpn.size(0) != x.size(0):\n            cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0))\n        x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode=\"nearest\")\n        x = self.lay3(x)\n        x = self.gn3(x)\n        x = F.relu(x)\n\n        cur_fpn = self.adapter2(fpns[1])\n        if cur_fpn.size(0) != x.size(0):\n            cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0))\n        x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode=\"nearest\")\n        x = self.lay4(x)\n        x = self.gn4(x)\n        x = F.relu(x)\n\n        cur_fpn = self.adapter3(fpns[2])\n        if cur_fpn.size(0) != x.size(0):\n            cur_fpn = expand(cur_fpn, x.size(0) / cur_fpn.size(0))\n        x = cur_fpn + F.interpolate(x, size=cur_fpn.shape[-2:], mode=\"nearest\")\n        x = self.lay5(x)\n        x = self.gn5(x)\n        x = F.relu(x)\n\n        x = self.out_lay(x)\n        return x\n\n\nclass MHAttentionMap(nn.Module):\n    \"\"\"This is a 2D attention module, which only returns the attention softmax (no multiplication by value)\"\"\"\n\n    def __init__(self, query_dim, hidden_dim, num_heads, dropout=0, bias=True):\n        super().__init__()\n        self.num_heads = num_heads\n        self.hidden_dim = hidden_dim\n        self.dropout = nn.Dropout(dropout)\n\n        self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias)\n        self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias)\n\n        nn.init.zeros_(self.k_linear.bias)\n        nn.init.zeros_(self.q_linear.bias)\n        nn.init.xavier_uniform_(self.k_linear.weight)\n        nn.init.xavier_uniform_(self.q_linear.weight)\n        self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5\n\n    def forward(self, q, k, mask=None):\n        q = self.q_linear(q)\n        k = F.conv2d(\n            k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias\n        )\n        qh = q.view(\n            q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads\n        )\n        kh = k.view(\n            k.shape[0],\n            self.num_heads,\n            self.hidden_dim // self.num_heads,\n            k.shape[-2],\n            k.shape[-1],\n        )\n        weights = torch.einsum(\"bqnc,bnchw->bqnhw\", qh * self.normalize_fact, kh)\n\n        if mask is not None:\n            weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), float(\"-inf\"))\n        weights = F.softmax(weights.flatten(2), dim=-1).view_as(weights)\n        weights = self.dropout(weights)\n        return weights\n\n\ndef dice_loss(inputs, targets, num_boxes):\n    \"\"\"\n    Compute the DICE loss, similar to generalized IOU for masks\n    Args:\n        inputs: A float tensor of arbitrary shape.\n                The predictions for each example.\n        targets: A float tensor with the same shape as inputs. Stores the binary\n                 classification label for each element in inputs\n                (0 for the negative class and 1 for the positive class).\n    \"\"\"\n    inputs = inputs.sigmoid()\n    inputs = inputs.flatten(1)\n    numerator = 2 * (inputs * targets).sum(1)\n    denominator = inputs.sum(-1) + targets.sum(-1)\n    loss = 1 - (numerator + 1) / (denominator + 1)\n    return loss.sum() / num_boxes\n\n\ndef sigmoid_focal_loss(\n    inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2\n):\n    \"\"\"\n    Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002.\n    Args:\n        inputs: A float tensor of arbitrary shape.\n                The predictions for each example.\n        targets: A float tensor with the same shape as inputs. Stores the binary\n                 classification label for each element in inputs\n                (0 for the negative class and 1 for the positive class).\n        alpha: (optional) Weighting factor in range (0,1) to balance\n                positive vs negative examples. Default = -1 (no weighting).\n        gamma: Exponent of the modulating factor (1 - p_t) to\n               balance easy vs hard examples.\n    Returns:\n        Loss tensor\n    \"\"\"\n    prob = inputs.sigmoid()\n    ce_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduction=\"none\")\n    p_t = prob * targets + (1 - prob) * (1 - targets)\n    loss = ce_loss * ((1 - p_t) ** gamma)\n\n    if alpha >= 0:\n        alpha_t = alpha * targets + (1 - alpha) * (1 - targets)\n        loss = alpha_t * loss\n\n    return loss.mean(1).sum() / num_boxes\n\n\nclass PostProcessSegm(nn.Module):\n    def __init__(self, threshold=0.5):\n        super().__init__()\n        self.threshold = threshold\n\n    @torch.no_grad()\n    def forward(self, results, outputs, orig_target_sizes, max_target_sizes):\n        assert len(orig_target_sizes) == len(max_target_sizes)\n        max_h, max_w = max_target_sizes.max(0)[0].tolist()\n        outputs_masks = outputs[\"pred_masks\"].squeeze(2)\n        outputs_masks = F.interpolate(\n            outputs_masks, size=(max_h, max_w), mode=\"bilinear\", align_corners=False\n        )\n        outputs_masks = (outputs_masks.sigmoid() > self.threshold).cpu()\n\n        for i, (cur_mask, t, tt) in enumerate(\n            zip(outputs_masks, max_target_sizes, orig_target_sizes)\n        ):\n            img_h, img_w = t[0], t[1]\n            results[i][\"masks\"] = cur_mask[:, :img_h, :img_w].unsqueeze(1)\n            results[i][\"masks\"] = F.interpolate(\n                results[i][\"masks\"].float(), size=tuple(tt.tolist()), mode=\"nearest\"\n            ).byte()\n\n        return results\n\n\nclass PostProcessPanoptic(nn.Module):\n    \"\"\"This class converts the output of the model to the final panoptic result, in the format expected by the\n    coco panoptic API \"\"\"\n\n    def __init__(self, is_thing_map, threshold=0.85):\n        \"\"\"\n        Parameters:\n           is_thing_map: This is a whose keys are the class ids, and the values a boolean indicating whether\n                          the class is  a thing (True) or a stuff (False) class\n           threshold: confidence threshold: segments with confidence lower than this will be deleted\n        \"\"\"\n        super().__init__()\n        self.threshold = threshold\n        self.is_thing_map = is_thing_map\n\n    def forward(self, outputs, processed_sizes, target_sizes=None):\n        \"\"\" This function computes the panoptic prediction from the model's predictions.\n        Parameters:\n            outputs: This is a dict coming directly from the model. See the model doc for the content.\n            processed_sizes: This is a list of tuples (or torch tensors) of sizes of the images that were passed to the\n                             model, ie the size after data augmentation but before batching.\n            target_sizes: This is a list of tuples (or torch tensors) corresponding to the requested final size\n                          of each prediction. If left to None, it will default to the processed_sizes\n            \"\"\"\n        if target_sizes is None:\n            target_sizes = processed_sizes\n        assert len(processed_sizes) == len(target_sizes)\n        out_logits, raw_masks, raw_boxes = (\n            outputs[\"pred_logits\"],\n            outputs[\"pred_masks\"],\n            outputs[\"pred_boxes\"],\n        )\n        assert len(out_logits) == len(raw_masks) == len(target_sizes)\n        preds = []\n\n        def to_tuple(tup):\n            if isinstance(tup, tuple):\n                return tup\n            return tuple(tup.cpu().tolist())\n\n        for cur_logits, cur_masks, cur_boxes, size, target_size in zip(\n            out_logits, raw_masks, raw_boxes, processed_sizes, target_sizes\n        ):\n            # we filter empty queries and detection below threshold\n            scores, labels = cur_logits.softmax(-1).max(-1)\n            keep = labels.ne(outputs[\"pred_logits\"].shape[-1] - 1) & (\n                scores > self.threshold\n            )\n            cur_scores, cur_classes = cur_logits.softmax(-1).max(-1)\n            cur_scores = cur_scores[keep]\n            cur_classes = cur_classes[keep]\n            cur_masks = cur_masks[keep]\n            cur_masks = interpolate(\n                cur_masks[None], to_tuple(size), mode=\"bilinear\"\n            ).squeeze(0)\n            cur_boxes = box_ops.box_cxcywh_to_xyxy(cur_boxes[keep])\n\n            h, w = cur_masks.shape[-2:]\n            assert len(cur_boxes) == len(cur_classes)\n\n            # It may be that we have several predicted masks for the same stuff class.\n            # In the following, we track the list of masks ids for each stuff class (they are merged later on)\n            cur_masks = cur_masks.flatten(1)\n            stuff_equiv_classes = defaultdict(lambda: [])\n            for k, label in enumerate(cur_classes):\n                if not self.is_thing_map[label.item()]:\n                    stuff_equiv_classes[label.item()].append(k)\n\n            def get_ids_area(masks, scores, dedup=False):\n                # This helper function creates the final panoptic segmentation image\n                # It also returns the area of the masks that appears on the image\n\n                m_id = masks.transpose(0, 1).softmax(-1)\n\n                if m_id.shape[-1] == 0:\n                    # We didn't detect any mask :(\n                    m_id = torch.zeros((h, w), dtype=torch.long, device=m_id.device)\n                else:\n                    m_id = m_id.argmax(-1).view(h, w)\n\n                if dedup:\n                    # Merge the masks corresponding to the same stuff class\n                    for equiv in stuff_equiv_classes.values():\n                        if len(equiv) > 1:\n                            for eq_id in equiv:\n                                m_id.masked_fill_(m_id.eq(eq_id), equiv[0])\n\n                final_h, final_w = to_tuple(target_size)\n\n                seg_img = Image.fromarray(id2rgb(m_id.view(h, w).cpu().numpy()))\n                seg_img = seg_img.resize(\n                    size=(final_w, final_h), resample=Image.NEAREST\n                )\n\n                np_seg_img = (\n                    torch.ByteTensor(torch.ByteStorage.from_buffer(seg_img.tobytes()))\n                    .view(final_h, final_w, 3)\n                    .numpy()\n                )\n                m_id = torch.from_numpy(rgb2id(np_seg_img))\n\n                area = []\n                for i in range(len(scores)):\n                    area.append(m_id.eq(i).sum().item())\n                return area, seg_img\n\n            area, seg_img = get_ids_area(cur_masks, cur_scores, dedup=True)\n            if cur_classes.numel() > 0:\n                # We know filter empty masks as long as we find some\n                while True:\n                    filtered_small = torch.as_tensor(\n                        [area[i] <= 4 for i, c in enumerate(cur_classes)],\n                        dtype=torch.bool,\n                        device=keep.device,\n                    )\n                    if filtered_small.any().item():\n                        cur_scores = cur_scores[~filtered_small]\n                        cur_classes = cur_classes[~filtered_small]\n                        cur_masks = cur_masks[~filtered_small]\n                        area, seg_img = get_ids_area(cur_masks, cur_scores)\n                    else:\n                        break\n\n            else:\n                cur_classes = torch.ones(1, dtype=torch.long, device=cur_classes.device)\n\n            segments_info = []\n            for i, a in enumerate(area):\n                cat = cur_classes[i].item()\n                segments_info.append(\n                    {\n                        \"id\": i,\n                        \"isthing\": self.is_thing_map[cat],\n                        \"category_id\": cat,\n                        \"area\": a,\n                    }\n                )\n            del cur_classes\n\n            with io.BytesIO() as out:\n                seg_img.save(out, format=\"PNG\")\n                predictions = {\n                    \"png_string\": out.getvalue(),\n                    \"segments_info\": segments_info,\n                }\n            preds.append(predictions)\n        return preds\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/swin_transformer.py",
    "content": "# --------------------------------------------------------\n# Swin Transformer\n# Copyright (c) 2021 Microsoft\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Ze Liu, Yutong Lin, Yixuan Wei\n# --------------------------------------------------------\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.utils.checkpoint as checkpoint\nimport numpy as np\nfrom timm.models.layers import DropPath, to_2tuple, trunc_normal_\n\nfrom mmdet.utils import get_root_logger\n\n\nclass Mlp(nn.Module):\n    \"\"\" Multilayer perceptron.\"\"\"\n\n    def __init__(\n        self,\n        in_features,\n        hidden_features=None,\n        out_features=None,\n        act_layer=nn.GELU,\n        drop=0.0,\n    ):\n        super().__init__()\n        out_features = out_features or in_features\n        hidden_features = hidden_features or in_features\n        self.fc1 = nn.Linear(in_features, hidden_features)\n        self.act = act_layer()\n        self.fc2 = nn.Linear(hidden_features, out_features)\n        self.drop = nn.Dropout(drop)\n\n    def forward(self, x):\n        x = self.fc1(x)\n        x = self.act(x)\n        x = self.drop(x)\n        x = self.fc2(x)\n        x = self.drop(x)\n        return x\n\n\ndef window_partition(x, window_size):\n    \"\"\"\n    Args:\n        x: (B, H, W, C)\n        window_size (int): window size\n\n    Returns:\n        windows: (num_windows*B, window_size, window_size, C)\n    \"\"\"\n    B, H, W, C = x.shape\n    x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)\n    windows = (\n        x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n    )\n    return windows\n\n\ndef window_reverse(windows, window_size, H, W):\n    \"\"\"\n    Args:\n        windows: (num_windows*B, window_size, window_size, C)\n        window_size (int): Window size\n        H (int): Height of image\n        W (int): Width of image\n\n    Returns:\n        x: (B, H, W, C)\n    \"\"\"\n    B = int(windows.shape[0] / (H * W / window_size / window_size))\n    x = windows.view(\n        B, H // window_size, W // window_size, window_size, window_size, -1\n    )\n    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)\n    return x\n\n\nclass WindowAttention(nn.Module):\n    \"\"\" Window based multi-head self attention (W-MSA) module with relative position bias.\n    It supports both of shifted and non-shifted window.\n\n    Args:\n        dim (int): Number of input channels.\n        window_size (tuple[int]): The height and width of the window.\n        num_heads (int): Number of attention heads.\n        qkv_bias (bool, optional):  If True, add a learnable bias to query, key, value. Default: True\n        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set\n        attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0\n        proj_drop (float, optional): Dropout ratio of output. Default: 0.0\n    \"\"\"\n\n    def __init__(\n        self,\n        dim,\n        window_size,\n        num_heads,\n        qkv_bias=True,\n        qk_scale=None,\n        attn_drop=0.0,\n        proj_drop=0.0,\n    ):\n\n        super().__init__()\n        self.dim = dim\n        self.window_size = window_size  # Wh, Ww\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = qk_scale or head_dim ** -0.5\n\n        # define a parameter table of relative position bias\n        self.relative_position_bias_table = nn.Parameter(\n            torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)\n        )  # 2*Wh-1 * 2*Ww-1, nH\n\n        # get pair-wise relative position index for each token inside the window\n        coords_h = torch.arange(self.window_size[0])\n        coords_w = torch.arange(self.window_size[1])\n        coords = torch.stack(torch.meshgrid([coords_h, coords_w]))  # 2, Wh, Ww\n        coords_flatten = torch.flatten(coords, 1)  # 2, Wh*Ww\n        relative_coords = (\n            coords_flatten[:, :, None] - coords_flatten[:, None, :]\n        )  # 2, Wh*Ww, Wh*Ww\n        relative_coords = relative_coords.permute(\n            1, 2, 0\n        ).contiguous()  # Wh*Ww, Wh*Ww, 2\n        relative_coords[:, :, 0] += self.window_size[0] - 1  # shift to start from 0\n        relative_coords[:, :, 1] += self.window_size[1] - 1\n        relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1\n        relative_position_index = relative_coords.sum(-1)  # Wh*Ww, Wh*Ww\n        self.register_buffer(\"relative_position_index\", relative_position_index)\n\n        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n        self.attn_drop = nn.Dropout(attn_drop)\n        self.proj = nn.Linear(dim, dim)\n        self.proj_drop = nn.Dropout(proj_drop)\n\n        trunc_normal_(self.relative_position_bias_table, std=0.02)\n        self.softmax = nn.Softmax(dim=-1)\n\n    def forward(self, x, mask=None):\n        \"\"\" Forward function.\n\n        Args:\n            x: input features with shape of (num_windows*B, N, C)\n            mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None\n        \"\"\"\n        B_, N, C = x.shape\n        qkv = (\n            self.qkv(x)\n            .reshape(B_, N, 3, self.num_heads, C // self.num_heads)\n            .permute(2, 0, 3, 1, 4)\n        )\n        q, k, v = (\n            qkv[0],\n            qkv[1],\n            qkv[2],\n        )  # make torchscript happy (cannot use tensor as tuple)\n\n        q = q * self.scale\n        attn = q @ k.transpose(-2, -1)\n\n        relative_position_bias = self.relative_position_bias_table[\n            self.relative_position_index.view(-1)\n        ].view(\n            self.window_size[0] * self.window_size[1],\n            self.window_size[0] * self.window_size[1],\n            -1,\n        )  # Wh*Ww,Wh*Ww,nH\n        relative_position_bias = relative_position_bias.permute(\n            2, 0, 1\n        ).contiguous()  # nH, Wh*Ww, Wh*Ww\n        attn = attn + relative_position_bias.unsqueeze(0)\n\n        if mask is not None:\n            nW = mask.shape[0]\n            attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(\n                1\n            ).unsqueeze(0)\n            attn = attn.view(-1, self.num_heads, N, N)\n            attn = self.softmax(attn)\n        else:\n            attn = self.softmax(attn)\n\n        attn = self.attn_drop(attn)\n\n        x = (attn @ v).transpose(1, 2).reshape(B_, N, C)\n        x = self.proj(x)\n        x = self.proj_drop(x)\n        return x\n\n\nclass SwinTransformerBlock(nn.Module):\n    \"\"\" Swin Transformer Block.\n\n    Args:\n        dim (int): Number of input channels.\n        num_heads (int): Number of attention heads.\n        window_size (int): Window size.\n        shift_size (int): Shift size for SW-MSA.\n        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n        drop (float, optional): Dropout rate. Default: 0.0\n        attn_drop (float, optional): Attention dropout rate. Default: 0.0\n        drop_path (float, optional): Stochastic depth rate. Default: 0.0\n        act_layer (nn.Module, optional): Activation layer. Default: nn.GELU\n        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm\n    \"\"\"\n\n    def __init__(\n        self,\n        dim,\n        num_heads,\n        window_size=7,\n        shift_size=0,\n        mlp_ratio=4.0,\n        qkv_bias=True,\n        qk_scale=None,\n        drop=0.0,\n        attn_drop=0.0,\n        drop_path=0.0,\n        act_layer=nn.GELU,\n        norm_layer=nn.LayerNorm,\n    ):\n        super().__init__()\n        self.dim = dim\n        self.num_heads = num_heads\n        self.window_size = window_size\n        self.shift_size = shift_size\n        self.mlp_ratio = mlp_ratio\n        assert (\n            0 <= self.shift_size < self.window_size\n        ), \"shift_size must in 0-window_size\"\n\n        self.norm1 = norm_layer(dim)\n        self.attn = WindowAttention(\n            dim,\n            window_size=to_2tuple(self.window_size),\n            num_heads=num_heads,\n            qkv_bias=qkv_bias,\n            qk_scale=qk_scale,\n            attn_drop=attn_drop,\n            proj_drop=drop,\n        )\n\n        self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()\n        self.norm2 = norm_layer(dim)\n        mlp_hidden_dim = int(dim * mlp_ratio)\n        self.mlp = Mlp(\n            in_features=dim,\n            hidden_features=mlp_hidden_dim,\n            act_layer=act_layer,\n            drop=drop,\n        )\n\n        self.H = None\n        self.W = None\n\n    def forward(self, x, mask_matrix):\n        \"\"\" Forward function.\n\n        Args:\n            x: Input feature, tensor size (B, H*W, C).\n            H, W: Spatial resolution of the input feature.\n            mask_matrix: Attention mask for cyclic shift.\n        \"\"\"\n        B, L, C = x.shape\n        H, W = self.H, self.W\n        assert L == H * W, \"input feature has wrong size\"\n\n        shortcut = x\n        x = self.norm1(x)\n        x = x.view(B, H, W, C)\n\n        # pad feature maps to multiples of window size\n        pad_l = pad_t = 0\n        pad_r = (self.window_size - W % self.window_size) % self.window_size\n        pad_b = (self.window_size - H % self.window_size) % self.window_size\n        x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))\n        _, Hp, Wp, _ = x.shape\n\n        # cyclic shift\n        if self.shift_size > 0:\n            shifted_x = torch.roll(\n                x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)\n            )\n            attn_mask = mask_matrix\n        else:\n            shifted_x = x\n            attn_mask = None\n\n        # partition windows\n        x_windows = window_partition(\n            shifted_x, self.window_size\n        )  # nW*B, window_size, window_size, C\n        x_windows = x_windows.view(\n            -1, self.window_size * self.window_size, C\n        )  # nW*B, window_size*window_size, C\n\n        # W-MSA/SW-MSA\n        attn_windows = self.attn(\n            x_windows, mask=attn_mask\n        )  # nW*B, window_size*window_size, C\n\n        # merge windows\n        attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)\n        shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp)  # B H' W' C\n\n        # reverse cyclic shift\n        if self.shift_size > 0:\n            x = torch.roll(\n                shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)\n            )\n        else:\n            x = shifted_x\n\n        if pad_r > 0 or pad_b > 0:\n            x = x[:, :H, :W, :].contiguous()\n\n        x = x.view(B, H * W, C)\n\n        # FFN\n        x = shortcut + self.drop_path(x)\n        x = x + self.drop_path(self.mlp(self.norm2(x)))\n\n        return x\n\n\nclass PatchMerging(nn.Module):\n    \"\"\" Patch Merging Layer\n\n    Args:\n        dim (int): Number of input channels.\n        norm_layer (nn.Module, optional): Normalization layer.  Default: nn.LayerNorm\n    \"\"\"\n\n    def __init__(self, dim, norm_layer=nn.LayerNorm):\n        super().__init__()\n        self.dim = dim\n        self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)\n        self.norm = norm_layer(4 * dim)\n\n    def forward(self, x, H, W):\n        \"\"\" Forward function.\n\n        Args:\n            x: Input feature, tensor size (B, H*W, C).\n            H, W: Spatial resolution of the input feature.\n        \"\"\"\n        B, L, C = x.shape\n        assert L == H * W, \"input feature has wrong size\"\n\n        x = x.view(B, H, W, C)\n\n        # padding\n        pad_input = (H % 2 == 1) or (W % 2 == 1)\n        if pad_input:\n            x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))\n\n        x0 = x[:, 0::2, 0::2, :]  # B H/2 W/2 C\n        x1 = x[:, 1::2, 0::2, :]  # B H/2 W/2 C\n        x2 = x[:, 0::2, 1::2, :]  # B H/2 W/2 C\n        x3 = x[:, 1::2, 1::2, :]  # B H/2 W/2 C\n        x = torch.cat([x0, x1, x2, x3], -1)  # B H/2 W/2 4*C\n        x = x.view(B, -1, 4 * C)  # B H/2*W/2 4*C\n\n        x = self.norm(x)\n        x = self.reduction(x)\n\n        return x\n\n\nclass BasicLayer(nn.Module):\n    \"\"\" A basic Swin Transformer layer for one stage.\n\n    Args:\n        dim (int): Number of feature channels\n        depth (int): Depths of this stage.\n        num_heads (int): Number of attention head.\n        window_size (int): Local window size. Default: 7.\n        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.\n        qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True\n        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.\n        drop (float, optional): Dropout rate. Default: 0.0\n        attn_drop (float, optional): Attention dropout rate. Default: 0.0\n        drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0\n        norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm\n        downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None\n        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n    \"\"\"\n\n    def __init__(\n        self,\n        dim,\n        depth,\n        num_heads,\n        window_size=7,\n        mlp_ratio=4.0,\n        qkv_bias=True,\n        qk_scale=None,\n        drop=0.0,\n        attn_drop=0.0,\n        drop_path=0.0,\n        norm_layer=nn.LayerNorm,\n        downsample=None,\n        use_checkpoint=False,\n    ):\n        super().__init__()\n        self.window_size = window_size\n        self.shift_size = window_size // 2\n        self.depth = depth\n        self.use_checkpoint = use_checkpoint\n\n        # build blocks\n        self.blocks = nn.ModuleList(\n            [\n                SwinTransformerBlock(\n                    dim=dim,\n                    num_heads=num_heads,\n                    window_size=window_size,\n                    shift_size=0 if (i % 2 == 0) else window_size // 2,\n                    mlp_ratio=mlp_ratio,\n                    qkv_bias=qkv_bias,\n                    qk_scale=qk_scale,\n                    drop=drop,\n                    attn_drop=attn_drop,\n                    drop_path=drop_path[i]\n                    if isinstance(drop_path, list)\n                    else drop_path,\n                    norm_layer=norm_layer,\n                )\n                for i in range(depth)\n            ]\n        )\n\n        # patch merging layer\n        if downsample is not None:\n            self.downsample = downsample(dim=dim, norm_layer=norm_layer)\n        else:\n            self.downsample = None\n\n    def forward(self, x, H, W):\n        \"\"\" Forward function.\n\n        Args:\n            x: Input feature, tensor size (B, H*W, C).\n            H, W: Spatial resolution of the input feature.\n        \"\"\"\n\n        # calculate attention mask for SW-MSA\n        Hp = int(np.ceil(H / self.window_size)) * self.window_size\n        Wp = int(np.ceil(W / self.window_size)) * self.window_size\n        img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device)  # 1 Hp Wp 1\n        h_slices = (\n            slice(0, -self.window_size),\n            slice(-self.window_size, -self.shift_size),\n            slice(-self.shift_size, None),\n        )\n        w_slices = (\n            slice(0, -self.window_size),\n            slice(-self.window_size, -self.shift_size),\n            slice(-self.shift_size, None),\n        )\n        cnt = 0\n        for h in h_slices:\n            for w in w_slices:\n                img_mask[:, h, w, :] = cnt\n                cnt += 1\n\n        mask_windows = window_partition(\n            img_mask, self.window_size\n        )  # nW, window_size, window_size, 1\n        mask_windows = mask_windows.view(-1, self.window_size * self.window_size)\n        attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)\n        attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(\n            attn_mask == 0, float(0.0)\n        )\n\n        for blk in self.blocks:\n            blk.H, blk.W = H, W\n            if self.use_checkpoint:\n                x = checkpoint.checkpoint(blk, x, attn_mask)\n            else:\n                x = blk(x, attn_mask)\n        if self.downsample is not None:\n            x_down = self.downsample(x, H, W)\n            Wh, Ww = (H + 1) // 2, (W + 1) // 2\n            return x, H, W, x_down, Wh, Ww\n        else:\n            return x, H, W, x, H, W\n\n\nclass PatchEmbed(nn.Module):\n    \"\"\" Image to Patch Embedding\n\n    Args:\n        patch_size (int): Patch token size. Default: 4.\n        in_chans (int): Number of input image channels. Default: 3.\n        embed_dim (int): Number of linear projection output channels. Default: 96.\n        norm_layer (nn.Module, optional): Normalization layer. Default: None\n    \"\"\"\n\n    def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):\n        super().__init__()\n        patch_size = to_2tuple(patch_size)\n        self.patch_size = patch_size\n\n        self.in_chans = in_chans\n        self.embed_dim = embed_dim\n\n        self.proj = nn.Conv2d(\n            in_chans, embed_dim, kernel_size=patch_size, stride=patch_size\n        )\n        if norm_layer is not None:\n            self.norm = norm_layer(embed_dim)\n        else:\n            self.norm = None\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        # padding\n        _, _, H, W = x.size()\n        if W % self.patch_size[1] != 0:\n            x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))\n        if H % self.patch_size[0] != 0:\n            x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))\n\n        x = self.proj(x)  # B C Wh Ww\n        if self.norm is not None:\n            Wh, Ww = x.size(2), x.size(3)\n            x = x.flatten(2).transpose(1, 2)\n            x = self.norm(x)\n            x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)\n\n        return x\n\n\nclass SwinTransformer(nn.Module):\n    \"\"\" Swin Transformer backbone.\n        A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows`  -\n          https://arxiv.org/pdf/2103.14030\n\n    Args:\n        pretrain_img_size (int): Input image size for training the pretrained model,\n            used in absolute postion embedding. Default 224.\n        patch_size (int | tuple(int)): Patch size. Default: 4.\n        in_chans (int): Number of input image channels. Default: 3.\n        embed_dim (int): Number of linear projection output channels. Default: 96.\n        depths (tuple[int]): Depths of each Swin Transformer stage.\n        num_heads (tuple[int]): Number of attention head of each stage.\n        window_size (int): Window size. Default: 7.\n        mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.\n        qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True\n        qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.\n        drop_rate (float): Dropout rate.\n        attn_drop_rate (float): Attention dropout rate. Default: 0.\n        drop_path_rate (float): Stochastic depth rate. Default: 0.2.\n        norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.\n        ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.\n        patch_norm (bool): If True, add normalization after patch embedding. Default: True.\n        out_indices (Sequence[int]): Output from which stages.\n        frozen_stages (int): Stages to be frozen (stop grad and set eval mode).\n            -1 means not freezing any parameters.\n        use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.\n    \"\"\"\n\n    def __init__(\n        self,\n        pretrain_img_size=224,\n        patch_size=4,\n        in_chans=3,\n        embed_dim=96,\n        depths=[2, 2, 6, 2],\n        num_heads=[3, 6, 12, 24],\n        window_size=7,\n        mlp_ratio=4.0,\n        qkv_bias=True,\n        qk_scale=None,\n        drop_rate=0.0,\n        attn_drop_rate=0.0,\n        drop_path_rate=0.2,\n        norm_layer=nn.LayerNorm,\n        ape=False,\n        patch_norm=True,\n        out_indices=(0, 1, 2, 3),\n        frozen_stages=-1,\n        use_checkpoint=False,\n    ):\n        super().__init__()\n        self.drop_path_rate = drop_path_rate\n        self.pretrain_img_size = pretrain_img_size\n        self.num_layers = len(depths)\n        self.embed_dim = embed_dim\n        self.ape = ape\n        self.patch_norm = patch_norm\n        self.out_indices = out_indices\n        self.frozen_stages = frozen_stages\n\n        # split image into non-overlapping patches\n        self.patch_embed = PatchEmbed(\n            patch_size=patch_size,\n            in_chans=in_chans,\n            embed_dim=embed_dim,\n            norm_layer=norm_layer if self.patch_norm else None,\n        )\n\n        # absolute position embedding\n        if self.ape:\n            pretrain_img_size = to_2tuple(pretrain_img_size)\n            patch_size = to_2tuple(patch_size)\n            patches_resolution = [\n                pretrain_img_size[0] // patch_size[0],\n                pretrain_img_size[1] // patch_size[1],\n            ]\n\n            self.absolute_pos_embed = nn.Parameter(\n                torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])\n            )\n            trunc_normal_(self.absolute_pos_embed, std=0.02)\n\n        self.pos_drop = nn.Dropout(p=drop_rate)\n\n        # stochastic depth\n        dpr = [\n            x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))\n        ]  # stochastic depth decay rule\n\n        # build layers\n        self.layers = nn.ModuleList()\n        for i_layer in range(self.num_layers):\n            layer = BasicLayer(\n                dim=int(embed_dim * 2 ** i_layer),\n                depth=depths[i_layer],\n                num_heads=num_heads[i_layer],\n                window_size=window_size,\n                mlp_ratio=mlp_ratio,\n                qkv_bias=qkv_bias,\n                qk_scale=qk_scale,\n                drop=drop_rate,\n                attn_drop=attn_drop_rate,\n                drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],\n                norm_layer=norm_layer,\n                downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,\n                use_checkpoint=use_checkpoint,\n            )\n            self.layers.append(layer)\n\n        num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]\n        self.num_features = num_features\n\n        # add a norm layer for each output\n        for i_layer in out_indices:\n            layer = norm_layer(num_features[i_layer])\n            layer_name = f\"norm{i_layer}\"\n            self.add_module(layer_name, layer)\n\n        self._freeze_stages()\n\n    def _freeze_stages(self):\n        if self.frozen_stages >= 0:\n            self.patch_embed.eval()\n            for param in self.patch_embed.parameters():\n                param.requires_grad = False\n\n        if self.frozen_stages >= 1 and self.ape:\n            self.absolute_pos_embed.requires_grad = False\n\n        if self.frozen_stages >= 2:\n            self.pos_drop.eval()\n            for i in range(0, self.frozen_stages - 1):\n                m = self.layers[i]\n                m.eval()\n                for param in m.parameters():\n                    param.requires_grad = False\n\n    def init_weights(self, pretrained=None):\n        \"\"\"Initialize the weights in backbone.\n\n        Args:\n            pretrained (str, optional): Path to pre-trained weights.\n                Defaults to None.\n        \"\"\"\n\n        def _init_weights(m):\n            if isinstance(m, nn.Linear):\n                trunc_normal_(m.weight, std=0.02)\n                if isinstance(m, nn.Linear) and m.bias is not None:\n                    nn.init.constant_(m.bias, 0)\n            elif isinstance(m, nn.LayerNorm):\n                nn.init.constant_(m.bias, 0)\n                nn.init.constant_(m.weight, 1.0)\n\n        if isinstance(pretrained, str):\n            self.apply(_init_weights)\n            logger = get_root_logger()\n        elif pretrained is None:\n            self.apply(_init_weights)\n        else:\n            raise TypeError(\"pretrained must be a str or None\")\n\n    def forward(self, x):\n        \"\"\"Forward function.\"\"\"\n        x = self.patch_embed(x)\n\n        Wh, Ww = x.size(2), x.size(3)\n        if self.ape:\n            # interpolate the position embedding to the corresponding size\n            absolute_pos_embed = F.interpolate(\n                self.absolute_pos_embed, size=(Wh, Ww), mode=\"bicubic\"\n            )\n            x = (x + absolute_pos_embed).flatten(2).transpose(1, 2)  # B Wh*Ww C\n        else:\n            x = x.flatten(2).transpose(1, 2)\n        x = self.pos_drop(x)\n\n        outs = {}\n        for i in range(self.num_layers):\n            layer = self.layers[i]\n            x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)\n\n            if i in self.out_indices:\n                norm_layer = getattr(self, f\"norm{i}\")\n                x_out = norm_layer(x_out)\n\n                out = (\n                    x_out.view(-1, H, W, self.num_features[i])\n                    .permute(0, 3, 1, 2)\n                    .contiguous()\n                )\n                outs[str(i)] = out\n\n        return outs\n\n    def train(self, mode=True):\n        \"\"\"Convert the model into training mode while keep layers freezed.\"\"\"\n        super(SwinTransformer, self).train(mode)\n        self._freeze_stages()\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/util/__init__.py",
    "content": "# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/util/box_ops.py",
    "content": "# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\"\"\"\nUtilities for bounding box manipulation and GIoU.\n\"\"\"\nimport torch\nfrom torchvision.ops.boxes import box_area\n\n\ndef box_cxcywh_to_xyxy(x):\n    x_c, y_c, w, h = x.unbind(-1)\n    b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)]\n    return torch.stack(b, dim=-1)\n\n\ndef box_xyxy_to_cxcywh(x):\n    x0, y0, x1, y1 = x.unbind(-1)\n    b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)]\n    return torch.stack(b, dim=-1)\n\n\n# modified from torchvision to also return the union\ndef box_iou(boxes1, boxes2):\n    area1 = box_area(boxes1)\n    area2 = box_area(boxes2)\n\n    lt = torch.max(boxes1[:, None, :2], boxes2[:, :2])  # [N,M,2]\n    rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:])  # [N,M,2]\n\n    wh = (rb - lt).clamp(min=0)  # [N,M,2]\n    inter = wh[:, :, 0] * wh[:, :, 1]  # [N,M]\n\n    union = area1[:, None] + area2 - inter\n\n    iou = inter / union\n    return iou, union\n\n\ndef generalized_box_iou(boxes1, boxes2):\n    \"\"\"\n    Generalized IoU from https://giou.stanford.edu/\n\n    The boxes should be in [x0, y0, x1, y1] format\n\n    Returns a [N, M] pairwise matrix, where N = len(boxes1)\n    and M = len(boxes2)\n    \"\"\"\n    # degenerate boxes gives inf / nan results\n    # so do an early check\n    assert (boxes1[:, 2:] >= boxes1[:, :2]).all()\n    assert (boxes2[:, 2:] >= boxes2[:, :2]).all()\n    iou, union = box_iou(boxes1, boxes2)\n\n    lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])\n    rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])\n\n    wh = (rb - lt).clamp(min=0)  # [N,M,2]\n    area = wh[:, :, 0] * wh[:, :, 1]\n\n    return iou - (area - union) / area\n\n\ndef masks_to_boxes(masks):\n    \"\"\"Compute the bounding boxes around the provided masks\n\n    The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.\n\n    Returns a [N, 4] tensors, with the boxes in xyxy format\n    \"\"\"\n    if masks.numel() == 0:\n        return torch.zeros((0, 4), device=masks.device)\n\n    h, w = masks.shape[-2:]\n\n    y = torch.arange(0, h, dtype=torch.float)\n    x = torch.arange(0, w, dtype=torch.float)\n    y, x = torch.meshgrid(y, x)\n\n    x_mask = masks * x.unsqueeze(0)\n    x_max = x_mask.flatten(1).max(-1)[0]\n    x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]\n\n    y_mask = masks * y.unsqueeze(0)\n    y_max = y_mask.flatten(1).max(-1)[0]\n    y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]\n\n    return torch.stack([x_min, y_min, x_max, y_max], 1)\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/util/misc.py",
    "content": "# ------------------------------------------------------------------------\n# H-DETR\n# Copyright (c) 2022 Peking University & Microsoft Research Asia. All Rights Reserved.\n# Licensed under the MIT-style license found in the LICENSE file in the root directory\n# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\"\"\"\nMisc functions, including distributed helpers.\n\nMostly copy-paste from torchvision references.\n\"\"\"\nimport os\nimport subprocess\nimport time\nfrom collections import defaultdict, deque\nimport datetime\nimport pickle\nfrom typing import Optional, List\n\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch import Tensor\n\n# needed due to empty tensor bug in pytorch and torchvision 0.5\nimport torchvision\n\n\nclass SmoothedValue(object):\n    \"\"\"Track a series of values and provide access to smoothed values over a\n    window or the global series average.\n    \"\"\"\n\n    def __init__(self, window_size=20, fmt=None):\n        if fmt is None:\n            fmt = \"{median:.4f} ({global_avg:.4f})\"\n        self.deque = deque(maxlen=window_size)\n        self.total = 0.0\n        self.count = 0\n        self.fmt = fmt\n\n    def update(self, value, n=1):\n        self.deque.append(value)\n        self.count += n\n        self.total += value * n\n\n    def synchronize_between_processes(self):\n        \"\"\"\n        Warning: does not synchronize the deque!\n        \"\"\"\n        if not is_dist_avail_and_initialized():\n            return\n        t = torch.tensor([self.count, self.total], dtype=torch.float64, device=\"cuda\")\n        dist.barrier()\n        dist.all_reduce(t)\n        t = t.tolist()\n        self.count = int(t[0])\n        self.total = t[1]\n\n    @property\n    def median(self):\n        d = torch.tensor(list(self.deque))\n        return d.median().item()\n\n    @property\n    def avg(self):\n        d = torch.tensor(list(self.deque), dtype=torch.float32)\n        return d.mean().item()\n\n    @property\n    def global_avg(self):\n        return self.total / self.count\n\n    @property\n    def max(self):\n        return max(self.deque)\n\n    @property\n    def value(self):\n        return self.deque[-1]\n\n    def __str__(self):\n        return self.fmt.format(\n            median=self.median,\n            avg=self.avg,\n            global_avg=self.global_avg,\n            max=self.max,\n            value=self.value,\n        )\n\n\ndef all_gather(data):\n    \"\"\"\n    Run all_gather on arbitrary picklable data (not necessarily tensors)\n    Args:\n        data: any picklable object\n    Returns:\n        list[data]: list of data gathered from each rank\n    \"\"\"\n    world_size = get_world_size()\n    if world_size == 1:\n        return [data]\n\n    # serialized to a Tensor\n    buffer = pickle.dumps(data)\n    storage = torch.ByteStorage.from_buffer(buffer)\n    tensor = torch.ByteTensor(storage).to(\"cuda\")\n\n    # obtain Tensor size of each rank\n    local_size = torch.tensor([tensor.numel()], device=\"cuda\")\n    size_list = [torch.tensor([0], device=\"cuda\") for _ in range(world_size)]\n    dist.all_gather(size_list, local_size)\n    size_list = [int(size.item()) for size in size_list]\n    max_size = max(size_list)\n\n    # receiving Tensor from all ranks\n    # we pad the tensor because torch all_gather does not support\n    # gathering tensors of different shapes\n    tensor_list = []\n    for _ in size_list:\n        tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device=\"cuda\"))\n    if local_size != max_size:\n        padding = torch.empty(\n            size=(max_size - local_size,), dtype=torch.uint8, device=\"cuda\"\n        )\n        tensor = torch.cat((tensor, padding), dim=0)\n    dist.all_gather(tensor_list, tensor)\n\n    data_list = []\n    for size, tensor in zip(size_list, tensor_list):\n        buffer = tensor.cpu().numpy().tobytes()[:size]\n        data_list.append(pickle.loads(buffer))\n\n    return data_list\n\n\ndef reduce_dict(input_dict, average=True):\n    \"\"\"\n    Args:\n        input_dict (dict): all the values will be reduced\n        average (bool): whether to do average or sum\n    Reduce the values in the dictionary from all processes so that all processes\n    have the averaged results. Returns a dict with the same fields as\n    input_dict, after reduction.\n    \"\"\"\n    world_size = get_world_size()\n    if world_size < 2:\n        return input_dict\n    with torch.no_grad():\n        names = []\n        values = []\n        # sort the keys so that they are consistent across processes\n        for k in sorted(input_dict.keys()):\n            names.append(k)\n            values.append(input_dict[k])\n        values = torch.stack(values, dim=0)\n        dist.all_reduce(values)\n        if average:\n            values /= world_size\n        reduced_dict = {k: v for k, v in zip(names, values)}\n    return reduced_dict\n\n\nclass MetricLogger(object):\n    def __init__(self, delimiter=\"\\t\"):\n        self.meters = defaultdict(SmoothedValue)\n        self.delimiter = delimiter\n\n    def update(self, **kwargs):\n        for k, v in kwargs.items():\n            if isinstance(v, torch.Tensor):\n                v = v.item()\n            assert isinstance(v, (float, int))\n            self.meters[k].update(v)\n\n    def __getattr__(self, attr):\n        if attr in self.meters:\n            return self.meters[attr]\n        if attr in self.__dict__:\n            return self.__dict__[attr]\n        raise AttributeError(\n            \"'{}' object has no attribute '{}'\".format(type(self).__name__, attr)\n        )\n\n    def __str__(self):\n        loss_str = []\n        for name, meter in self.meters.items():\n            loss_str.append(\"{}: {}\".format(name, str(meter)))\n        return self.delimiter.join(loss_str)\n\n    def synchronize_between_processes(self):\n        for meter in self.meters.values():\n            meter.synchronize_between_processes()\n\n    def add_meter(self, name, meter):\n        self.meters[name] = meter\n\n    def log_every(self, iterable, print_freq, header=None):\n        i = 0\n        if not header:\n            header = \"\"\n        start_time = time.time()\n        end = time.time()\n        iter_time = SmoothedValue(fmt=\"{avg:.4f}\")\n        data_time = SmoothedValue(fmt=\"{avg:.4f}\")\n        space_fmt = \":\" + str(len(str(len(iterable)))) + \"d\"\n        if torch.cuda.is_available():\n            log_msg = self.delimiter.join(\n                [\n                    header,\n                    \"[{0\" + space_fmt + \"}/{1}]\",\n                    \"eta: {eta}\",\n                    \"{meters}\",\n                    \"time: {time}\",\n                    \"data: {data}\",\n                    \"max mem: {memory:.0f}\",\n                ]\n            )\n        else:\n            log_msg = self.delimiter.join(\n                [\n                    header,\n                    \"[{0\" + space_fmt + \"}/{1}]\",\n                    \"eta: {eta}\",\n                    \"{meters}\",\n                    \"time: {time}\",\n                    \"data: {data}\",\n                ]\n            )\n        MB = 1024.0 * 1024.0\n        for obj in iterable:\n            data_time.update(time.time() - end)\n            yield obj\n            iter_time.update(time.time() - end)\n            if i % print_freq == 0 or i == len(iterable) - 1:\n                eta_seconds = iter_time.global_avg * (len(iterable) - i)\n                eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n                if torch.cuda.is_available():\n                    print(\n                        log_msg.format(\n                            i,\n                            len(iterable),\n                            eta=eta_string,\n                            meters=str(self),\n                            time=str(iter_time),\n                            data=str(data_time),\n                            memory=torch.cuda.max_memory_allocated() / MB,\n                        )\n                    )\n                else:\n                    print(\n                        log_msg.format(\n                            i,\n                            len(iterable),\n                            eta=eta_string,\n                            meters=str(self),\n                            time=str(iter_time),\n                            data=str(data_time),\n                        )\n                    )\n            i += 1\n            end = time.time()\n        total_time = time.time() - start_time\n        total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n        print(\n            \"{} Total time: {} ({:.4f} s / it)\".format(\n                header, total_time_str, total_time / len(iterable)\n            )\n        )\n\n\ndef get_sha():\n    cwd = os.path.dirname(os.path.abspath(__file__))\n\n    def _run(command):\n        return subprocess.check_output(command, cwd=cwd).decode(\"ascii\").strip()\n\n    sha = \"N/A\"\n    diff = \"clean\"\n    branch = \"N/A\"\n    try:\n        sha = _run([\"git\", \"rev-parse\", \"HEAD\"])\n        subprocess.check_output([\"git\", \"diff\"], cwd=cwd)\n        diff = _run([\"git\", \"diff-index\", \"HEAD\"])\n        diff = \"has uncommited changes\" if diff else \"clean\"\n        branch = _run([\"git\", \"rev-parse\", \"--abbrev-ref\", \"HEAD\"])\n    except Exception:\n        pass\n    message = f\"sha: {sha}, status: {diff}, branch: {branch}\"\n    return message\n\n\ndef collate_fn(batch):\n    batch = list(zip(*batch))\n    batch[0] = nested_tensor_from_tensor_list(batch[0])\n    return tuple(batch)\n\n\ndef _max_by_axis(the_list):\n    # type: (List[List[int]]) -> List[int]\n    maxes = the_list[0]\n    for sublist in the_list[1:]:\n        for index, item in enumerate(sublist):\n            maxes[index] = max(maxes[index], item)\n    return maxes\n\n\ndef nested_tensor_from_tensor_list(tensor_list: List[Tensor]):\n    # TODO make this more general\n    if tensor_list[0].ndim == 3:\n        # TODO make it support different-sized images\n        max_size = _max_by_axis([list(img.shape) for img in tensor_list])\n        # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))\n        batch_shape = [len(tensor_list)] + max_size\n        b, c, h, w = batch_shape\n        dtype = tensor_list[0].dtype\n        device = tensor_list[0].device\n        tensor = torch.zeros(batch_shape, dtype=dtype, device=device)\n        mask = torch.ones((b, h, w), dtype=torch.bool, device=device)\n        for img, pad_img, m in zip(tensor_list, tensor, mask):\n            pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)\n            m[: img.shape[1], : img.shape[2]] = False\n    else:\n        raise ValueError(\"not supported\")\n    return NestedTensor(tensor, mask)\n\n\nclass NestedTensor(object):\n    def __init__(self, tensors, mask: Optional[Tensor]):\n        self.tensors = tensors\n        self.mask = mask\n\n    def to(self, device, non_blocking=False):\n        # type: (Device) -> NestedTensor # noqa\n        cast_tensor = self.tensors.to(device, non_blocking=non_blocking)\n        mask = self.mask\n        if mask is not None:\n            assert mask is not None\n            cast_mask = mask.to(device, non_blocking=non_blocking)\n        else:\n            cast_mask = None\n        return NestedTensor(cast_tensor, cast_mask)\n\n    def record_stream(self, *args, **kwargs):\n        self.tensors.record_stream(*args, **kwargs)\n        if self.mask is not None:\n            self.mask.record_stream(*args, **kwargs)\n\n    def decompose(self):\n        return self.tensors, self.mask\n\n    def __repr__(self):\n        return str(self.tensors)\n\n\ndef setup_for_distributed(is_master):\n    \"\"\"\n    This function disables printing when not in master process\n    \"\"\"\n    import builtins as __builtin__\n\n    builtin_print = __builtin__.print\n\n    def print(*args, **kwargs):\n        force = kwargs.pop(\"force\", False)\n        if is_master or force:\n            builtin_print(*args, **kwargs)\n\n    __builtin__.print = print\n\n\ndef is_dist_avail_and_initialized():\n    if not dist.is_available():\n        return False\n    if not dist.is_initialized():\n        return False\n    return True\n\n\ndef get_world_size():\n    if not is_dist_avail_and_initialized():\n        return 1\n    return dist.get_world_size()\n\n\ndef get_rank():\n    if not is_dist_avail_and_initialized():\n        return 0\n    return dist.get_rank()\n\n\ndef get_local_size():\n    if not is_dist_avail_and_initialized():\n        return 1\n    return int(os.environ[\"LOCAL_SIZE\"])\n\n\ndef get_local_rank():\n    if not is_dist_avail_and_initialized():\n        return 0\n    return int(os.environ[\"LOCAL_RANK\"])\n\n\ndef is_main_process():\n    return get_rank() == 0\n\n\ndef save_on_master(*args, **kwargs):\n    if is_main_process():\n        torch.save(*args, **kwargs)\n\n\ndef init_distributed_mode(args):\n    if \"RANK\" in os.environ and \"WORLD_SIZE\" in os.environ:\n        args.rank = int(os.environ[\"RANK\"])\n        args.world_size = int(os.environ[\"WORLD_SIZE\"])\n        args.gpu = int(os.environ[\"LOCAL_RANK\"])\n        args.dist_url = \"env://\"\n        os.environ[\"LOCAL_SIZE\"] = str(torch.cuda.device_count())\n    elif \"SLURM_PROCID\" in os.environ:\n        proc_id = int(os.environ[\"SLURM_PROCID\"])\n        ntasks = int(os.environ[\"SLURM_NTASKS\"])\n        node_list = os.environ[\"SLURM_NODELIST\"]\n        num_gpus = torch.cuda.device_count()\n        addr = subprocess.getoutput(\n            \"scontrol show hostname {} | head -n1\".format(node_list)\n        )\n        os.environ[\"MASTER_PORT\"] = os.environ.get(\"MASTER_PORT\", \"29500\")\n        os.environ[\"MASTER_ADDR\"] = addr\n        os.environ[\"WORLD_SIZE\"] = str(ntasks)\n        os.environ[\"RANK\"] = str(proc_id)\n        os.environ[\"LOCAL_RANK\"] = str(proc_id % num_gpus)\n        os.environ[\"LOCAL_SIZE\"] = str(num_gpus)\n        args.dist_url = \"env://\"\n        args.world_size = ntasks\n        args.rank = proc_id\n        args.gpu = proc_id % num_gpus\n    else:\n        print(\"Not using distributed mode\")\n        args.distributed = False\n        return\n\n    args.distributed = True\n\n    torch.cuda.set_device(args.gpu)\n    args.dist_backend = \"nccl\"\n    print(\n        \"| distributed init (rank {}): {}\".format(args.rank, args.dist_url), flush=True\n    )\n    torch.distributed.init_process_group(\n        backend=args.dist_backend,\n        init_method=args.dist_url,\n        world_size=args.world_size,\n        rank=args.rank,\n    )\n    torch.distributed.barrier()\n    setup_for_distributed(args.rank == 0)\n\n\n@torch.no_grad()\ndef accuracy(output, target, topk=(1,)):\n    \"\"\"Computes the precision@k for the specified values of k\"\"\"\n    if target.numel() == 0:\n        return [torch.zeros([], device=output.device)]\n    maxk = max(topk)\n    batch_size = target.size(0)\n\n    _, pred = output.topk(maxk, 1, True, True)\n    pred = pred.t()\n    correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n    res = []\n    for k in topk:\n        correct_k = correct[:k].view(-1).float().sum(0)\n        res.append(correct_k.mul_(100.0 / batch_size))\n    return res\n\n\ndef interpolate(\n    input, size=None, scale_factor=None, mode=\"nearest\", align_corners=None\n):\n    # type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor\n    \"\"\"\n    Equivalent to nn.functional.interpolate, but with support for empty batch sizes.\n    This will eventually be supported natively by PyTorch, and this\n    class can go away.\n    \"\"\"\n    return torchvision.ops.misc.interpolate(\n        input, size, scale_factor, mode, align_corners\n    )\n\n\ndef get_total_grad_norm(parameters, norm_type=2):\n    parameters = list(filter(lambda p: p.grad is not None, parameters))\n    norm_type = float(norm_type)\n    device = parameters[0].grad.device\n    total_norm = torch.norm(\n        torch.stack(\n            [torch.norm(p.grad.detach(), norm_type).to(device) for p in parameters]\n        ),\n        norm_type,\n    )\n    return total_norm\n\n\ndef inverse_sigmoid(x, eps=1e-5):\n    x = x.clamp(min=0, max=1)\n    x1 = x.clamp(min=eps)\n    x2 = (1 - x).clamp(min=eps)\n    return torch.log(x1 / x2)\n\n"
  },
  {
    "path": "projects/instance_segment_anything/models/hdetr/models/util/plot_utils.py",
    "content": "# ------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------\n# Modified from DETR (https://github.com/facebookresearch/detr)\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n# ------------------------------------------------------------------------\n\n\"\"\"\nPlotting utilities to visualize training logs.\n\"\"\"\nimport torch\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nfrom pathlib import Path, PurePath\n\n\ndef plot_logs(logs, fields=('class_error', 'loss_bbox_unscaled', 'mAP'), ewm_col=0, log_name='log.txt'):\n    '''\n    Function to plot specific fields from training log(s). Plots both training and test results.\n\n    :: Inputs - logs = list containing Path objects, each pointing to individual dir with a log file\n              - fields = which results to plot from each log file - plots both training and test for each field.\n              - ewm_col = optional, which column to use as the exponential weighted smoothing of the plots\n              - log_name = optional, name of log file if different than default 'log.txt'.\n\n    :: Outputs - matplotlib plots of results in fields, color coded for each log file.\n               - solid lines are training results, dashed lines are test results.\n\n    '''\n    func_name = \"plot_utils.py::plot_logs\"\n\n    # verify logs is a list of Paths (list[Paths]) or single Pathlib object Path,\n    # convert single Path to list to avoid 'not iterable' error\n\n    if not isinstance(logs, list):\n        if isinstance(logs, PurePath):\n            logs = [logs]\n            print(f\"{func_name} info: logs param expects a list argument, converted to list[Path].\")\n        else:\n            raise ValueError(f\"{func_name} - invalid argument for logs parameter.\\n \\\n            Expect list[Path] or single Path obj, received {type(logs)}\")\n\n    # verify valid dir(s) and that every item in list is Path object\n    for i, dir in enumerate(logs):\n        if not isinstance(dir, PurePath):\n            raise ValueError(f\"{func_name} - non-Path object in logs argument of {type(dir)}: \\n{dir}\")\n        if dir.exists():\n            continue\n        raise ValueError(f\"{func_name} - invalid directory in logs argument:\\n{dir}\")\n\n    # load log file(s) and plot\n    dfs = [pd.read_json(Path(p) / log_name, lines=True) for p in logs]\n\n    fig, axs = plt.subplots(ncols=len(fields), figsize=(16, 5))\n\n    for df, color in zip(dfs, sns.color_palette(n_colors=len(logs))):\n        for j, field in enumerate(fields):\n            if field == 'mAP':\n                coco_eval = pd.DataFrame(pd.np.stack(df.test_coco_eval.dropna().values)[:, 1]).ewm(com=ewm_col).mean()\n                axs[j].plot(coco_eval, c=color)\n            else:\n                df.interpolate().ewm(com=ewm_col).mean().plot(\n                    y=[f'train_{field}', f'test_{field}'],\n                    ax=axs[j],\n                    color=[color] * 2,\n                    style=['-', '--']\n                )\n    for ax, field in zip(axs, fields):\n        ax.legend([Path(p).name for p in logs])\n        ax.set_title(field)\n\n\ndef plot_precision_recall(files, naming_scheme='iter'):\n    if naming_scheme == 'exp_id':\n        # name becomes exp_id\n        names = [f.parts[-3] for f in files]\n    elif naming_scheme == 'iter':\n        names = [f.stem for f in files]\n    else:\n        raise ValueError(f'not supported {naming_scheme}')\n    fig, axs = plt.subplots(ncols=2, figsize=(16, 5))\n    for f, color, name in zip(files, sns.color_palette(\"Blues\", n_colors=len(files)), names):\n        data = torch.load(f)\n        # precision is n_iou, n_points, n_cat, n_area, max_det\n        precision = data['precision']\n        recall = data['params'].recThrs\n        scores = data['scores']\n        # take precision for all classes, all areas and 100 detections\n        precision = precision[0, :, :, 0, -1].mean(1)\n        scores = scores[0, :, :, 0, -1].mean(1)\n        prec = precision.mean()\n        rec = data['recall'][0, :, 0, -1].mean()\n        print(f'{naming_scheme} {name}: mAP@50={prec * 100: 05.1f}, ' +\n              f'score={scores.mean():0.3f}, ' +\n              f'f1={2 * prec * rec / (prec + rec + 1e-8):0.3f}'\n              )\n        axs[0].plot(recall, precision, c=color)\n        axs[1].plot(recall, scores, c=color)\n\n    axs[0].set_title('Precision / Recall')\n    axs[0].legend(names)\n    axs[1].set_title('Scores / Recall')\n    axs[1].legend(names)\n    return fig, axs\n\n\n\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/__init__.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom .build_sam import (\n    build_sam,\n    build_sam_vit_h,\n    build_sam_vit_l,\n    build_sam_vit_b,\n    sam_model_registry,\n)\nfrom .predictor import SamPredictor\nfrom .automatic_mask_generator import SamAutomaticMaskGenerator\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/automatic_mask_generator.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nimport torch\nfrom torchvision.ops.boxes import batched_nms, box_area  # type: ignore\n\nfrom typing import Any, Dict, List, Optional, Tuple\n\nfrom .modeling import Sam\nfrom .predictor import SamPredictor\nfrom .utils.amg import (\n    MaskData,\n    area_from_rle,\n    batch_iterator,\n    batched_mask_to_box,\n    box_xyxy_to_xywh,\n    build_all_layer_point_grids,\n    calculate_stability_score,\n    coco_encode_rle,\n    generate_crop_boxes,\n    is_box_near_crop_edge,\n    mask_to_rle_pytorch,\n    remove_small_regions,\n    rle_to_mask,\n    uncrop_boxes_xyxy,\n    uncrop_masks,\n    uncrop_points,\n)\n\n\nclass SamAutomaticMaskGenerator:\n    def __init__(\n        self,\n        model: Sam,\n        points_per_side: Optional[int] = 32,\n        points_per_batch: int = 64,\n        pred_iou_thresh: float = 0.88,\n        stability_score_thresh: float = 0.95,\n        stability_score_offset: float = 1.0,\n        box_nms_thresh: float = 0.7,\n        crop_n_layers: int = 0,\n        crop_nms_thresh: float = 0.7,\n        crop_overlap_ratio: float = 512 / 1500,\n        crop_n_points_downscale_factor: int = 1,\n        point_grids: Optional[List[np.ndarray]] = None,\n        min_mask_region_area: int = 0,\n        output_mode: str = \"binary_mask\",\n    ) -> None:\n        \"\"\"\n        Using a SAM model, generates masks for the entire image.\n        Generates a grid of point prompts over the image, then filters\n        low quality and duplicate masks. The default settings are chosen\n        for SAM with a ViT-H backbone.\n\n        Arguments:\n          model (Sam): The SAM model to use for mask prediction.\n          points_per_side (int or None): The number of points to be sampled\n            along one side of the image. The total number of points is\n            points_per_side**2. If None, 'point_grids' must provide explicit\n            point sampling.\n          points_per_batch (int): Sets the number of points run simultaneously\n            by the model. Higher numbers may be faster but use more GPU memory.\n          pred_iou_thresh (float): A filtering threshold in [0,1], using the\n            model's predicted mask quality.\n          stability_score_thresh (float): A filtering threshold in [0,1], using\n            the stability of the mask under changes to the cutoff used to binarize\n            the model's mask predictions.\n          stability_score_offset (float): The amount to shift the cutoff when\n            calculated the stability score.\n          box_nms_thresh (float): The box IoU cutoff used by non-maximal\n            suppression to filter duplicate masks.\n          crops_n_layers (int): If >0, mask prediction will be run again on\n            crops of the image. Sets the number of layers to run, where each\n            layer has 2**i_layer number of image crops.\n          crops_nms_thresh (float): The box IoU cutoff used by non-maximal\n            suppression to filter duplicate masks between different crops.\n          crop_overlap_ratio (float): Sets the degree to which crops overlap.\n            In the first crop layer, crops will overlap by this fraction of\n            the image length. Later layers with more crops scale down this overlap.\n          crop_n_points_downscale_factor (int): The number of points-per-side\n            sampled in layer n is scaled down by crop_n_points_downscale_factor**n.\n          point_grids (list(np.ndarray) or None): A list over explicit grids\n            of points used for sampling, normalized to [0,1]. The nth grid in the\n            list is used in the nth crop layer. Exclusive with points_per_side.\n          min_mask_region_area (int): If >0, postprocessing will be applied\n            to remove disconnected regions and holes in masks with area smaller\n            than min_mask_region_area. Requires opencv.\n          output_mode (str): The form masks are returned in. Can be 'binary_mask',\n            'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.\n            For large resolutions, 'binary_mask' may consume large amounts of\n            memory.\n        \"\"\"\n\n        assert (points_per_side is None) != (\n            point_grids is None\n        ), \"Exactly one of points_per_side or point_grid must be provided.\"\n        if points_per_side is not None:\n            self.point_grids = build_all_layer_point_grids(\n                points_per_side,\n                crop_n_layers,\n                crop_n_points_downscale_factor,\n            )\n        elif point_grids is not None:\n            self.point_grids = point_grids\n        else:\n            raise ValueError(\"Can't have both points_per_side and point_grid be None.\")\n\n        assert output_mode in [\n            \"binary_mask\",\n            \"uncompressed_rle\",\n            \"coco_rle\",\n        ], f\"Unknown output_mode {output_mode}.\"\n        if output_mode == \"coco_rle\":\n            from pycocotools import mask as mask_utils  # type: ignore # noqa: F401\n\n        if min_mask_region_area > 0:\n            import cv2  # type: ignore # noqa: F401\n\n        self.predictor = SamPredictor(model)\n        self.points_per_batch = points_per_batch\n        self.pred_iou_thresh = pred_iou_thresh\n        self.stability_score_thresh = stability_score_thresh\n        self.stability_score_offset = stability_score_offset\n        self.box_nms_thresh = box_nms_thresh\n        self.crop_n_layers = crop_n_layers\n        self.crop_nms_thresh = crop_nms_thresh\n        self.crop_overlap_ratio = crop_overlap_ratio\n        self.crop_n_points_downscale_factor = crop_n_points_downscale_factor\n        self.min_mask_region_area = min_mask_region_area\n        self.output_mode = output_mode\n\n    @torch.no_grad()\n    def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:\n        \"\"\"\n        Generates masks for the given image.\n\n        Arguments:\n          image (np.ndarray): The image to generate masks for, in HWC uint8 format.\n\n        Returns:\n           list(dict(str, any)): A list over records for masks. Each record is\n             a dict containing the following keys:\n               segmentation (dict(str, any) or np.ndarray): The mask. If\n                 output_mode='binary_mask', is an array of shape HW. Otherwise,\n                 is a dictionary containing the RLE.\n               bbox (list(float)): The box around the mask, in XYWH format.\n               area (int): The area in pixels of the mask.\n               predicted_iou (float): The model's own prediction of the mask's\n                 quality. This is filtered by the pred_iou_thresh parameter.\n               point_coords (list(list(float))): The point coordinates input\n                 to the model to generate this mask.\n               stability_score (float): A measure of the mask's quality. This\n                 is filtered on using the stability_score_thresh parameter.\n               crop_box (list(float)): The crop of the image used to generate\n                 the mask, given in XYWH format.\n        \"\"\"\n\n        # Generate masks\n        mask_data = self._generate_masks(image)\n\n        # Filter small disconnected regions and holes in masks\n        if self.min_mask_region_area > 0:\n            mask_data = self.postprocess_small_regions(\n                mask_data,\n                self.min_mask_region_area,\n                max(self.box_nms_thresh, self.crop_nms_thresh),\n            )\n\n        # Encode masks\n        if self.output_mode == \"coco_rle\":\n            mask_data[\"segmentations\"] = [coco_encode_rle(rle) for rle in mask_data[\"rles\"]]\n        elif self.output_mode == \"binary_mask\":\n            mask_data[\"segmentations\"] = [rle_to_mask(rle) for rle in mask_data[\"rles\"]]\n        else:\n            mask_data[\"segmentations\"] = mask_data[\"rles\"]\n\n        # Write mask records\n        curr_anns = []\n        for idx in range(len(mask_data[\"segmentations\"])):\n            ann = {\n                \"segmentation\": mask_data[\"segmentations\"][idx],\n                \"area\": area_from_rle(mask_data[\"rles\"][idx]),\n                \"bbox\": box_xyxy_to_xywh(mask_data[\"boxes\"][idx]).tolist(),\n                \"predicted_iou\": mask_data[\"iou_preds\"][idx].item(),\n                \"point_coords\": [mask_data[\"points\"][idx].tolist()],\n                \"stability_score\": mask_data[\"stability_score\"][idx].item(),\n                \"crop_box\": box_xyxy_to_xywh(mask_data[\"crop_boxes\"][idx]).tolist(),\n            }\n            curr_anns.append(ann)\n\n        return curr_anns\n\n    def _generate_masks(self, image: np.ndarray) -> MaskData:\n        orig_size = image.shape[:2]\n        crop_boxes, layer_idxs = generate_crop_boxes(\n            orig_size, self.crop_n_layers, self.crop_overlap_ratio\n        )\n\n        # Iterate over image crops\n        data = MaskData()\n        for crop_box, layer_idx in zip(crop_boxes, layer_idxs):\n            crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)\n            data.cat(crop_data)\n\n        # Remove duplicate masks between crops\n        if len(crop_boxes) > 1:\n            # Prefer masks from smaller crops\n            scores = 1 / box_area(data[\"crop_boxes\"])\n            scores = scores.to(data[\"boxes\"].device)\n            keep_by_nms = batched_nms(\n                data[\"boxes\"].float(),\n                scores,\n                torch.zeros(len(data[\"boxes\"])),  # categories\n                iou_threshold=self.crop_nms_thresh,\n            )\n            data.filter(keep_by_nms)\n\n        data.to_numpy()\n        return data\n\n    def _process_crop(\n        self,\n        image: np.ndarray,\n        crop_box: List[int],\n        crop_layer_idx: int,\n        orig_size: Tuple[int, ...],\n    ) -> MaskData:\n        # Crop the image and calculate embeddings\n        x0, y0, x1, y1 = crop_box\n        cropped_im = image[y0:y1, x0:x1, :]\n        cropped_im_size = cropped_im.shape[:2]\n        self.predictor.set_image(cropped_im)\n\n        # Get points for this crop\n        points_scale = np.array(cropped_im_size)[None, ::-1]\n        points_for_image = self.point_grids[crop_layer_idx] * points_scale\n\n        # Generate masks for this crop in batches\n        data = MaskData()\n        for (points,) in batch_iterator(self.points_per_batch, points_for_image):\n            batch_data = self._process_batch(points, cropped_im_size, crop_box, orig_size)\n            data.cat(batch_data)\n            del batch_data\n        self.predictor.reset_image()\n\n        # Remove duplicates within this crop.\n        keep_by_nms = batched_nms(\n            data[\"boxes\"].float(),\n            data[\"iou_preds\"],\n            torch.zeros(len(data[\"boxes\"])),  # categories\n            iou_threshold=self.box_nms_thresh,\n        )\n        data.filter(keep_by_nms)\n\n        # Return to the original image frame\n        data[\"boxes\"] = uncrop_boxes_xyxy(data[\"boxes\"], crop_box)\n        data[\"points\"] = uncrop_points(data[\"points\"], crop_box)\n        data[\"crop_boxes\"] = torch.tensor([crop_box for _ in range(len(data[\"rles\"]))])\n\n        return data\n\n    def _process_batch(\n        self,\n        points: np.ndarray,\n        im_size: Tuple[int, ...],\n        crop_box: List[int],\n        orig_size: Tuple[int, ...],\n    ) -> MaskData:\n        orig_h, orig_w = orig_size\n\n        # Run model on this batch\n        transformed_points = self.predictor.transform.apply_coords(points, im_size)\n        in_points = torch.as_tensor(transformed_points, device=self.predictor.device)\n        in_labels = torch.ones(in_points.shape[0], dtype=torch.int, device=in_points.device)\n        masks, iou_preds, _ = self.predictor.predict_torch(\n            in_points[:, None, :],\n            in_labels[:, None],\n            multimask_output=True,\n            return_logits=True,\n        )\n\n        # Serialize predictions and store in MaskData\n        data = MaskData(\n            masks=masks.flatten(0, 1),\n            iou_preds=iou_preds.flatten(0, 1),\n            points=torch.as_tensor(points.repeat(masks.shape[1], axis=0)),\n        )\n        del masks\n\n        # Filter by predicted IoU\n        if self.pred_iou_thresh > 0.0:\n            keep_mask = data[\"iou_preds\"] > self.pred_iou_thresh\n            data.filter(keep_mask)\n\n        # Calculate stability score\n        data[\"stability_score\"] = calculate_stability_score(\n            data[\"masks\"], self.predictor.model.mask_threshold, self.stability_score_offset\n        )\n        if self.stability_score_thresh > 0.0:\n            keep_mask = data[\"stability_score\"] >= self.stability_score_thresh\n            data.filter(keep_mask)\n\n        # Threshold masks and calculate boxes\n        data[\"masks\"] = data[\"masks\"] > self.predictor.model.mask_threshold\n        data[\"boxes\"] = batched_mask_to_box(data[\"masks\"])\n\n        # Filter boxes that touch crop boundaries\n        keep_mask = ~is_box_near_crop_edge(data[\"boxes\"], crop_box, [0, 0, orig_w, orig_h])\n        if not torch.all(keep_mask):\n            data.filter(keep_mask)\n\n        # Compress to RLE\n        data[\"masks\"] = uncrop_masks(data[\"masks\"], crop_box, orig_h, orig_w)\n        data[\"rles\"] = mask_to_rle_pytorch(data[\"masks\"])\n        del data[\"masks\"]\n\n        return data\n\n    @staticmethod\n    def postprocess_small_regions(\n        mask_data: MaskData, min_area: int, nms_thresh: float\n    ) -> MaskData:\n        \"\"\"\n        Removes small disconnected regions and holes in masks, then reruns\n        box NMS to remove any new duplicates.\n\n        Edits mask_data in place.\n\n        Requires open-cv as a dependency.\n        \"\"\"\n        if len(mask_data[\"rles\"]) == 0:\n            return mask_data\n\n        # Filter small disconnected regions and holes\n        new_masks = []\n        scores = []\n        for rle in mask_data[\"rles\"]:\n            mask = rle_to_mask(rle)\n\n            mask, changed = remove_small_regions(mask, min_area, mode=\"holes\")\n            unchanged = not changed\n            mask, changed = remove_small_regions(mask, min_area, mode=\"islands\")\n            unchanged = unchanged and not changed\n\n            new_masks.append(torch.as_tensor(mask).unsqueeze(0))\n            # Give score=0 to changed masks and score=1 to unchanged masks\n            # so NMS will prefer ones that didn't need postprocessing\n            scores.append(float(unchanged))\n\n        # Recalculate boxes and remove any new duplicates\n        masks = torch.cat(new_masks, dim=0)\n        boxes = batched_mask_to_box(masks)\n        keep_by_nms = batched_nms(\n            boxes.float(),\n            torch.as_tensor(scores),\n            torch.zeros(len(boxes)),  # categories\n            iou_threshold=nms_thresh,\n        )\n\n        # Only recalculate RLEs for masks that have changed\n        for i_mask in keep_by_nms:\n            if scores[i_mask] == 0.0:\n                mask_torch = masks[i_mask].unsqueeze(0)\n                mask_data[\"rles\"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]\n                mask_data[\"boxes\"][i_mask] = boxes[i_mask]  # update res directly\n        mask_data.filter(keep_by_nms)\n\n        return mask_data\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/build_sam.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\n\nfrom functools import partial\n\nfrom .modeling import ImageEncoderViT, MaskDecoder, PromptEncoder, Sam, TwoWayTransformer\n\n\ndef build_sam_vit_h(checkpoint=None):\n    return _build_sam(\n        encoder_embed_dim=1280,\n        encoder_depth=32,\n        encoder_num_heads=16,\n        encoder_global_attn_indexes=[7, 15, 23, 31],\n        checkpoint=checkpoint,\n    )\n\n\nbuild_sam = build_sam_vit_h\n\n\ndef build_sam_vit_l(checkpoint=None):\n    return _build_sam(\n        encoder_embed_dim=1024,\n        encoder_depth=24,\n        encoder_num_heads=16,\n        encoder_global_attn_indexes=[5, 11, 17, 23],\n        checkpoint=checkpoint,\n    )\n\n\ndef build_sam_vit_b(checkpoint=None):\n    return _build_sam(\n        encoder_embed_dim=768,\n        encoder_depth=12,\n        encoder_num_heads=12,\n        encoder_global_attn_indexes=[2, 5, 8, 11],\n        checkpoint=checkpoint,\n    )\n\n\nsam_model_registry = {\n    \"default\": build_sam,\n    \"vit_h\": build_sam,\n    \"vit_l\": build_sam_vit_l,\n    \"vit_b\": build_sam_vit_b,\n}\n\n\ndef _build_sam(\n    encoder_embed_dim,\n    encoder_depth,\n    encoder_num_heads,\n    encoder_global_attn_indexes,\n    checkpoint=None,\n):\n    prompt_embed_dim = 256\n    image_size = 1024\n    vit_patch_size = 16\n    image_embedding_size = image_size // vit_patch_size\n    sam = Sam(\n        image_encoder=ImageEncoderViT(\n            depth=encoder_depth,\n            embed_dim=encoder_embed_dim,\n            img_size=image_size,\n            mlp_ratio=4,\n            norm_layer=partial(torch.nn.LayerNorm, eps=1e-6),\n            num_heads=encoder_num_heads,\n            patch_size=vit_patch_size,\n            qkv_bias=True,\n            use_rel_pos=True,\n            global_attn_indexes=encoder_global_attn_indexes,\n            window_size=14,\n            out_chans=prompt_embed_dim,\n        ),\n        prompt_encoder=PromptEncoder(\n            embed_dim=prompt_embed_dim,\n            image_embedding_size=(image_embedding_size, image_embedding_size),\n            input_image_size=(image_size, image_size),\n            mask_in_chans=16,\n        ),\n        mask_decoder=MaskDecoder(\n            num_multimask_outputs=3,\n            transformer=TwoWayTransformer(\n                depth=2,\n                embedding_dim=prompt_embed_dim,\n                mlp_dim=2048,\n                num_heads=8,\n            ),\n            transformer_dim=prompt_embed_dim,\n            iou_head_depth=3,\n            iou_head_hidden_dim=256,\n        ),\n        pixel_mean=[123.675, 116.28, 103.53],\n        pixel_std=[58.395, 57.12, 57.375],\n    )\n    sam.eval()\n    if checkpoint is not None:\n        with open(checkpoint, \"rb\") as f:\n            state_dict = torch.load(f)\n        sam.load_state_dict(state_dict)\n    return sam\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/modeling/__init__.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom .sam import Sam\nfrom .image_encoder import ImageEncoderViT\nfrom .mask_decoder import MaskDecoder\nfrom .prompt_encoder import PromptEncoder\nfrom .transformer import TwoWayTransformer\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/modeling/common.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\n\nfrom typing import Type\n\n\nclass MLPBlock(nn.Module):\n    def __init__(\n        self,\n        embedding_dim: int,\n        mlp_dim: int,\n        act: Type[nn.Module] = nn.GELU,\n    ) -> None:\n        super().__init__()\n        self.lin1 = nn.Linear(embedding_dim, mlp_dim)\n        self.lin2 = nn.Linear(mlp_dim, embedding_dim)\n        self.act = act()\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        return self.lin2(self.act(self.lin1(x)))\n\n\n# From https://github.com/facebookresearch/detectron2/blob/main/detectron2/layers/batch_norm.py # noqa\n# Itself from https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119  # noqa\nclass LayerNorm2d(nn.Module):\n    def __init__(self, num_channels: int, eps: float = 1e-6) -> None:\n        super().__init__()\n        self.weight = nn.Parameter(torch.ones(num_channels))\n        self.bias = nn.Parameter(torch.zeros(num_channels))\n        self.eps = eps\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        u = x.mean(1, keepdim=True)\n        s = (x - u).pow(2).mean(1, keepdim=True)\n        x = (x - u) / torch.sqrt(s + self.eps)\n        x = self.weight[:, None, None] * x + self.bias[:, None, None]\n        return x\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/modeling/image_encoder.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom typing import Optional, Tuple, Type\n\nfrom .common import LayerNorm2d, MLPBlock\n\n\n# This class and its supporting functions below lightly adapted from the ViTDet backbone available at: https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/backbone/vit.py # noqa\nclass ImageEncoderViT(nn.Module):\n    def __init__(\n        self,\n        img_size: int = 1024,\n        patch_size: int = 16,\n        in_chans: int = 3,\n        embed_dim: int = 768,\n        depth: int = 12,\n        num_heads: int = 12,\n        mlp_ratio: float = 4.0,\n        out_chans: int = 256,\n        qkv_bias: bool = True,\n        norm_layer: Type[nn.Module] = nn.LayerNorm,\n        act_layer: Type[nn.Module] = nn.GELU,\n        use_abs_pos: bool = True,\n        use_rel_pos: bool = False,\n        rel_pos_zero_init: bool = True,\n        window_size: int = 0,\n        global_attn_indexes: Tuple[int, ...] = (),\n    ) -> None:\n        \"\"\"\n        Args:\n            img_size (int): Input image size.\n            patch_size (int): Patch size.\n            in_chans (int): Number of input image channels.\n            embed_dim (int): Patch embedding dimension.\n            depth (int): Depth of ViT.\n            num_heads (int): Number of attention heads in each ViT block.\n            mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n            qkv_bias (bool): If True, add a learnable bias to query, key, value.\n            norm_layer (nn.Module): Normalization layer.\n            act_layer (nn.Module): Activation layer.\n            use_abs_pos (bool): If True, use absolute positional embeddings.\n            use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\n            rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n            window_size (int): Window size for window attention blocks.\n            global_attn_indexes (list): Indexes for blocks using global attention.\n        \"\"\"\n        super().__init__()\n        self.img_size = img_size\n\n        self.patch_embed = PatchEmbed(\n            kernel_size=(patch_size, patch_size),\n            stride=(patch_size, patch_size),\n            in_chans=in_chans,\n            embed_dim=embed_dim,\n        )\n\n        self.pos_embed: Optional[nn.Parameter] = None\n        if use_abs_pos:\n            # Initialize absolute positional embedding with pretrain image size.\n            self.pos_embed = nn.Parameter(\n                torch.zeros(1, img_size // patch_size, img_size // patch_size, embed_dim)\n            )\n\n        self.blocks = nn.ModuleList()\n        for i in range(depth):\n            block = Block(\n                dim=embed_dim,\n                num_heads=num_heads,\n                mlp_ratio=mlp_ratio,\n                qkv_bias=qkv_bias,\n                norm_layer=norm_layer,\n                act_layer=act_layer,\n                use_rel_pos=use_rel_pos,\n                rel_pos_zero_init=rel_pos_zero_init,\n                window_size=window_size if i not in global_attn_indexes else 0,\n                input_size=(img_size // patch_size, img_size // patch_size),\n            )\n            self.blocks.append(block)\n\n        self.neck = nn.Sequential(\n            nn.Conv2d(\n                embed_dim,\n                out_chans,\n                kernel_size=1,\n                bias=False,\n            ),\n            LayerNorm2d(out_chans),\n            nn.Conv2d(\n                out_chans,\n                out_chans,\n                kernel_size=3,\n                padding=1,\n                bias=False,\n            ),\n            LayerNorm2d(out_chans),\n        )\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        x = self.patch_embed(x)\n        if self.pos_embed is not None:\n            x = x + self.pos_embed\n\n        for blk in self.blocks:\n            x = blk(x)\n\n        x = self.neck(x.permute(0, 3, 1, 2))\n\n        return x\n\n\nclass Block(nn.Module):\n    \"\"\"Transformer blocks with support of window attention and residual propagation blocks\"\"\"\n\n    def __init__(\n        self,\n        dim: int,\n        num_heads: int,\n        mlp_ratio: float = 4.0,\n        qkv_bias: bool = True,\n        norm_layer: Type[nn.Module] = nn.LayerNorm,\n        act_layer: Type[nn.Module] = nn.GELU,\n        use_rel_pos: bool = False,\n        rel_pos_zero_init: bool = True,\n        window_size: int = 0,\n        input_size: Optional[Tuple[int, int]] = None,\n    ) -> None:\n        \"\"\"\n        Args:\n            dim (int): Number of input channels.\n            num_heads (int): Number of attention heads in each ViT block.\n            mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.\n            qkv_bias (bool): If True, add a learnable bias to query, key, value.\n            norm_layer (nn.Module): Normalization layer.\n            act_layer (nn.Module): Activation layer.\n            use_rel_pos (bool): If True, add relative positional embeddings to the attention map.\n            rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n            window_size (int): Window size for window attention blocks. If it equals 0, then\n                use global attention.\n            input_size (int or None): Input resolution for calculating the relative positional\n                parameter size.\n        \"\"\"\n        super().__init__()\n        self.norm1 = norm_layer(dim)\n        self.attn = Attention(\n            dim,\n            num_heads=num_heads,\n            qkv_bias=qkv_bias,\n            use_rel_pos=use_rel_pos,\n            rel_pos_zero_init=rel_pos_zero_init,\n            input_size=input_size if window_size == 0 else (window_size, window_size),\n        )\n\n        self.norm2 = norm_layer(dim)\n        self.mlp = MLPBlock(embedding_dim=dim, mlp_dim=int(dim * mlp_ratio), act=act_layer)\n\n        self.window_size = window_size\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        shortcut = x\n        x = self.norm1(x)\n        # Window partition\n        if self.window_size > 0:\n            H, W = x.shape[1], x.shape[2]\n            x, pad_hw = window_partition(x, self.window_size)\n\n        x = self.attn(x)\n        # Reverse window partition\n        if self.window_size > 0:\n            x = window_unpartition(x, self.window_size, pad_hw, (H, W))\n\n        x = shortcut + x\n        x = x + self.mlp(self.norm2(x))\n\n        return x\n\n\nclass Attention(nn.Module):\n    \"\"\"Multi-head Attention block with relative position embeddings.\"\"\"\n\n    def __init__(\n        self,\n        dim: int,\n        num_heads: int = 8,\n        qkv_bias: bool = True,\n        use_rel_pos: bool = False,\n        rel_pos_zero_init: bool = True,\n        input_size: Optional[Tuple[int, int]] = None,\n    ) -> None:\n        \"\"\"\n        Args:\n            dim (int): Number of input channels.\n            num_heads (int): Number of attention heads.\n            qkv_bias (bool:  If True, add a learnable bias to query, key, value.\n            rel_pos (bool): If True, add relative positional embeddings to the attention map.\n            rel_pos_zero_init (bool): If True, zero initialize relative positional parameters.\n            input_size (int or None): Input resolution for calculating the relative positional\n                parameter size.\n        \"\"\"\n        super().__init__()\n        self.num_heads = num_heads\n        head_dim = dim // num_heads\n        self.scale = head_dim**-0.5\n\n        self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)\n        self.proj = nn.Linear(dim, dim)\n\n        self.use_rel_pos = use_rel_pos\n        if self.use_rel_pos:\n            assert (\n                input_size is not None\n            ), \"Input size must be provided if using relative positional encoding.\"\n            # initialize relative positional embeddings\n            self.rel_pos_h = nn.Parameter(torch.zeros(2 * input_size[0] - 1, head_dim))\n            self.rel_pos_w = nn.Parameter(torch.zeros(2 * input_size[1] - 1, head_dim))\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        B, H, W, _ = x.shape\n        # qkv with shape (3, B, nHead, H * W, C)\n        qkv = self.qkv(x).reshape(B, H * W, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)\n        # q, k, v with shape (B * nHead, H * W, C)\n        q, k, v = qkv.reshape(3, B * self.num_heads, H * W, -1).unbind(0)\n\n        attn = (q * self.scale) @ k.transpose(-2, -1)\n\n        if self.use_rel_pos:\n            attn = add_decomposed_rel_pos(attn, q, self.rel_pos_h, self.rel_pos_w, (H, W), (H, W))\n\n        attn = attn.softmax(dim=-1)\n        x = (attn @ v).view(B, self.num_heads, H, W, -1).permute(0, 2, 3, 1, 4).reshape(B, H, W, -1)\n        x = self.proj(x)\n\n        return x\n\n\ndef window_partition(x: torch.Tensor, window_size: int) -> Tuple[torch.Tensor, Tuple[int, int]]:\n    \"\"\"\n    Partition into non-overlapping windows with padding if needed.\n    Args:\n        x (tensor): input tokens with [B, H, W, C].\n        window_size (int): window size.\n\n    Returns:\n        windows: windows after partition with [B * num_windows, window_size, window_size, C].\n        (Hp, Wp): padded height and width before partition\n    \"\"\"\n    B, H, W, C = x.shape\n\n    pad_h = (window_size - H % window_size) % window_size\n    pad_w = (window_size - W % window_size) % window_size\n    if pad_h > 0 or pad_w > 0:\n        x = F.pad(x, (0, 0, 0, pad_w, 0, pad_h))\n    Hp, Wp = H + pad_h, W + pad_w\n\n    x = x.view(B, Hp // window_size, window_size, Wp // window_size, window_size, C)\n    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)\n    return windows, (Hp, Wp)\n\n\ndef window_unpartition(\n    windows: torch.Tensor, window_size: int, pad_hw: Tuple[int, int], hw: Tuple[int, int]\n) -> torch.Tensor:\n    \"\"\"\n    Window unpartition into original sequences and removing padding.\n    Args:\n        x (tensor): input tokens with [B * num_windows, window_size, window_size, C].\n        window_size (int): window size.\n        pad_hw (Tuple): padded height and width (Hp, Wp).\n        hw (Tuple): original height and width (H, W) before padding.\n\n    Returns:\n        x: unpartitioned sequences with [B, H, W, C].\n    \"\"\"\n    Hp, Wp = pad_hw\n    H, W = hw\n    B = windows.shape[0] // (Hp * Wp // window_size // window_size)\n    x = windows.view(B, Hp // window_size, Wp // window_size, window_size, window_size, -1)\n    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, Hp, Wp, -1)\n\n    if Hp > H or Wp > W:\n        x = x[:, :H, :W, :].contiguous()\n    return x\n\n\ndef get_rel_pos(q_size: int, k_size: int, rel_pos: torch.Tensor) -> torch.Tensor:\n    \"\"\"\n    Get relative positional embeddings according to the relative positions of\n        query and key sizes.\n    Args:\n        q_size (int): size of query q.\n        k_size (int): size of key k.\n        rel_pos (Tensor): relative position embeddings (L, C).\n\n    Returns:\n        Extracted positional embeddings according to relative positions.\n    \"\"\"\n    max_rel_dist = int(2 * max(q_size, k_size) - 1)\n    # Interpolate rel pos if needed.\n    if rel_pos.shape[0] != max_rel_dist:\n        # Interpolate rel pos.\n        rel_pos_resized = F.interpolate(\n            rel_pos.reshape(1, rel_pos.shape[0], -1).permute(0, 2, 1),\n            size=max_rel_dist,\n            mode=\"linear\",\n        )\n        rel_pos_resized = rel_pos_resized.reshape(-1, max_rel_dist).permute(1, 0)\n    else:\n        rel_pos_resized = rel_pos\n\n    # Scale the coords with short length if shapes for q and k are different.\n    q_coords = torch.arange(q_size)[:, None] * max(k_size / q_size, 1.0)\n    k_coords = torch.arange(k_size)[None, :] * max(q_size / k_size, 1.0)\n    relative_coords = (q_coords - k_coords) + (k_size - 1) * max(q_size / k_size, 1.0)\n\n    return rel_pos_resized[relative_coords.long()]\n\n\ndef add_decomposed_rel_pos(\n    attn: torch.Tensor,\n    q: torch.Tensor,\n    rel_pos_h: torch.Tensor,\n    rel_pos_w: torch.Tensor,\n    q_size: Tuple[int, int],\n    k_size: Tuple[int, int],\n) -> torch.Tensor:\n    \"\"\"\n    Calculate decomposed Relative Positional Embeddings from :paper:`mvitv2`.\n    https://github.com/facebookresearch/mvit/blob/19786631e330df9f3622e5402b4a419a263a2c80/mvit/models/attention.py   # noqa B950\n    Args:\n        attn (Tensor): attention map.\n        q (Tensor): query q in the attention layer with shape (B, q_h * q_w, C).\n        rel_pos_h (Tensor): relative position embeddings (Lh, C) for height axis.\n        rel_pos_w (Tensor): relative position embeddings (Lw, C) for width axis.\n        q_size (Tuple): spatial sequence size of query q with (q_h, q_w).\n        k_size (Tuple): spatial sequence size of key k with (k_h, k_w).\n\n    Returns:\n        attn (Tensor): attention map with added relative positional embeddings.\n    \"\"\"\n    q_h, q_w = q_size\n    k_h, k_w = k_size\n    Rh = get_rel_pos(q_h, k_h, rel_pos_h)\n    Rw = get_rel_pos(q_w, k_w, rel_pos_w)\n\n    B, _, dim = q.shape\n    r_q = q.reshape(B, q_h, q_w, dim)\n    rel_h = torch.einsum(\"bhwc,hkc->bhwk\", r_q, Rh)\n    rel_w = torch.einsum(\"bhwc,wkc->bhwk\", r_q, Rw)\n\n    attn = (\n        attn.view(B, q_h, q_w, k_h, k_w) + rel_h[:, :, :, :, None] + rel_w[:, :, :, None, :]\n    ).view(B, q_h * q_w, k_h * k_w)\n\n    return attn\n\n\nclass PatchEmbed(nn.Module):\n    \"\"\"\n    Image to Patch Embedding.\n    \"\"\"\n\n    def __init__(\n        self,\n        kernel_size: Tuple[int, int] = (16, 16),\n        stride: Tuple[int, int] = (16, 16),\n        padding: Tuple[int, int] = (0, 0),\n        in_chans: int = 3,\n        embed_dim: int = 768,\n    ) -> None:\n        \"\"\"\n        Args:\n            kernel_size (Tuple): kernel size of the projection layer.\n            stride (Tuple): stride of the projection layer.\n            padding (Tuple): padding size of the projection layer.\n            in_chans (int): Number of input image channels.\n            embed_dim (int):  embed_dim (int): Patch embedding dimension.\n        \"\"\"\n        super().__init__()\n\n        self.proj = nn.Conv2d(\n            in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding\n        )\n\n    def forward(self, x: torch.Tensor) -> torch.Tensor:\n        x = self.proj(x)\n        # B C H W -> B H W C\n        x = x.permute(0, 2, 3, 1)\n        return x\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/modeling/mask_decoder.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom typing import List, Tuple, Type\n\nfrom .common import LayerNorm2d\n\n\nclass MaskDecoder(nn.Module):\n    def __init__(\n        self,\n        *,\n        transformer_dim: int,\n        transformer: nn.Module,\n        num_multimask_outputs: int = 3,\n        activation: Type[nn.Module] = nn.GELU,\n        iou_head_depth: int = 3,\n        iou_head_hidden_dim: int = 256,\n    ) -> None:\n        \"\"\"\n        Predicts masks given an image and prompt embeddings, using a\n        tranformer architecture.\n\n        Arguments:\n          transformer_dim (int): the channel dimension of the transformer\n          transformer (nn.Module): the transformer used to predict masks\n          num_multimask_outputs (int): the number of masks to predict\n            when disambiguating masks\n          activation (nn.Module): the type of activation to use when\n            upscaling masks\n          iou_head_depth (int): the depth of the MLP used to predict\n            mask quality\n          iou_head_hidden_dim (int): the hidden dimension of the MLP\n            used to predict mask quality\n        \"\"\"\n        super().__init__()\n        self.transformer_dim = transformer_dim\n        self.transformer = transformer\n\n        self.num_multimask_outputs = num_multimask_outputs\n\n        self.iou_token = nn.Embedding(1, transformer_dim)\n        self.num_mask_tokens = num_multimask_outputs + 1\n        self.mask_tokens = nn.Embedding(self.num_mask_tokens, transformer_dim)\n\n        self.output_upscaling = nn.Sequential(\n            nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),\n            LayerNorm2d(transformer_dim // 4),\n            activation(),\n            nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),\n            activation(),\n        )\n        self.output_hypernetworks_mlps = nn.ModuleList(\n            [\n                MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)\n                for i in range(self.num_mask_tokens)\n            ]\n        )\n\n        self.iou_prediction_head = MLP(\n            transformer_dim, iou_head_hidden_dim, self.num_mask_tokens, iou_head_depth\n        )\n\n    def forward(\n        self,\n        image_embeddings: torch.Tensor,\n        image_pe: torch.Tensor,\n        sparse_prompt_embeddings: torch.Tensor,\n        dense_prompt_embeddings: torch.Tensor,\n        multimask_output: bool,\n    ) -> Tuple[torch.Tensor, torch.Tensor]:\n        \"\"\"\n        Predict masks given image and prompt embeddings.\n\n        Arguments:\n          image_embeddings (torch.Tensor): the embeddings from the image encoder\n          image_pe (torch.Tensor): positional encoding with the shape of image_embeddings\n          sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes\n          dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs\n          multimask_output (bool): Whether to return multiple masks or a single\n            mask.\n\n        Returns:\n          torch.Tensor: batched predicted masks\n          torch.Tensor: batched predictions of mask quality\n        \"\"\"\n        masks, iou_pred = self.predict_masks(\n            image_embeddings=image_embeddings,\n            image_pe=image_pe,\n            sparse_prompt_embeddings=sparse_prompt_embeddings,\n            dense_prompt_embeddings=dense_prompt_embeddings,\n        )\n\n        # Select the correct mask or masks for outptu\n        if multimask_output:\n            mask_slice = slice(1, None)\n        else:\n            mask_slice = slice(0, 1)\n        masks = masks[:, mask_slice, :, :]\n        iou_pred = iou_pred[:, mask_slice]\n\n        # Prepare output\n        return masks, iou_pred\n\n    def predict_masks(\n        self,\n        image_embeddings: torch.Tensor,\n        image_pe: torch.Tensor,\n        sparse_prompt_embeddings: torch.Tensor,\n        dense_prompt_embeddings: torch.Tensor,\n    ) -> Tuple[torch.Tensor, torch.Tensor]:\n        \"\"\"Predicts masks. See 'forward' for more details.\"\"\"\n        # Concatenate output tokens\n        output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight], dim=0)\n        output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)\n        tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)\n\n        # Expand per-image data in batch direction to be per-mask\n        src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)\n        src = src + dense_prompt_embeddings\n        pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)\n        b, c, h, w = src.shape\n\n        # Run the transformer\n        hs, src = self.transformer(src, pos_src, tokens)\n        iou_token_out = hs[:, 0, :]\n        mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]\n\n        # Upscale mask embeddings and predict masks using the mask tokens\n        src = src.transpose(1, 2).view(b, c, h, w)\n        upscaled_embedding = self.output_upscaling(src)\n        hyper_in_list: List[torch.Tensor] = []\n        for i in range(self.num_mask_tokens):\n            hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))\n        hyper_in = torch.stack(hyper_in_list, dim=1)\n        b, c, h, w = upscaled_embedding.shape\n        masks = (hyper_in @ upscaled_embedding.view(b, c, h * w)).view(b, -1, h, w)\n\n        # Generate mask quality predictions\n        iou_pred = self.iou_prediction_head(iou_token_out)\n\n        return masks, iou_pred\n\n\n# Lightly adapted from\n# https://github.com/facebookresearch/MaskFormer/blob/main/mask_former/modeling/transformer/transformer_predictor.py # noqa\nclass MLP(nn.Module):\n    def __init__(\n        self,\n        input_dim: int,\n        hidden_dim: int,\n        output_dim: int,\n        num_layers: int,\n        sigmoid_output: bool = False,\n    ) -> None:\n        super().__init__()\n        self.num_layers = num_layers\n        h = [hidden_dim] * (num_layers - 1)\n        self.layers = nn.ModuleList(\n            nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])\n        )\n        self.sigmoid_output = sigmoid_output\n\n    def forward(self, x):\n        for i, layer in enumerate(self.layers):\n            x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)\n        if self.sigmoid_output:\n            x = F.sigmoid(x)\n        return x\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/modeling/prompt_encoder.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom typing import Any, Optional, Tuple, Type\n\nfrom .common import LayerNorm2d\n\n\nclass PromptEncoder(nn.Module):\n    def __init__(\n        self,\n        embed_dim: int,\n        image_embedding_size: Tuple[int, int],\n        input_image_size: Tuple[int, int],\n        mask_in_chans: int,\n        activation: Type[nn.Module] = nn.GELU,\n    ) -> None:\n        \"\"\"\n        Encodes prompts for input to SAM's mask decoder.\n\n        Arguments:\n          embed_dim (int): The prompts' embedding dimension\n          image_embedding_size (tuple(int, int)): The spatial size of the\n            image embedding, as (H, W).\n          input_image_size (int): The padded size of the image as input\n            to the image encoder, as (H, W).\n          mask_in_chans (int): The number of hidden channels used for\n            encoding input masks.\n          activation (nn.Module): The activation to use when encoding\n            input masks.\n        \"\"\"\n        super().__init__()\n        self.embed_dim = embed_dim\n        self.input_image_size = input_image_size\n        self.image_embedding_size = image_embedding_size\n        self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)\n\n        self.num_point_embeddings: int = 4  # pos/neg point + 2 box corners\n        point_embeddings = [nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)]\n        self.point_embeddings = nn.ModuleList(point_embeddings)\n        self.not_a_point_embed = nn.Embedding(1, embed_dim)\n\n        self.mask_input_size = (4 * image_embedding_size[0], 4 * image_embedding_size[1])\n        self.mask_downscaling = nn.Sequential(\n            nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),\n            LayerNorm2d(mask_in_chans // 4),\n            activation(),\n            nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),\n            LayerNorm2d(mask_in_chans),\n            activation(),\n            nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),\n        )\n        self.no_mask_embed = nn.Embedding(1, embed_dim)\n\n    def get_dense_pe(self) -> torch.Tensor:\n        \"\"\"\n        Returns the positional encoding used to encode point prompts,\n        applied to a dense set of points the shape of the image encoding.\n\n        Returns:\n          torch.Tensor: Positional encoding with shape\n            1x(embed_dim)x(embedding_h)x(embedding_w)\n        \"\"\"\n        return self.pe_layer(self.image_embedding_size).unsqueeze(0)\n\n    def _embed_points(\n        self,\n        points: torch.Tensor,\n        labels: torch.Tensor,\n        pad: bool,\n    ) -> torch.Tensor:\n        \"\"\"Embeds point prompts.\"\"\"\n        points = points + 0.5  # Shift to center of pixel\n        if pad:\n            padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)\n            padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)\n            points = torch.cat([points, padding_point], dim=1)\n            labels = torch.cat([labels, padding_label], dim=1)\n        point_embedding = self.pe_layer.forward_with_coords(points, self.input_image_size)\n        point_embedding[labels == -1] = 0.0\n        point_embedding[labels == -1] += self.not_a_point_embed.weight\n        point_embedding[labels == 0] += self.point_embeddings[0].weight\n        point_embedding[labels == 1] += self.point_embeddings[1].weight\n        return point_embedding\n\n    def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:\n        \"\"\"Embeds box prompts.\"\"\"\n        boxes = boxes + 0.5  # Shift to center of pixel\n        coords = boxes.reshape(-1, 2, 2)\n        corner_embedding = self.pe_layer.forward_with_coords(coords, self.input_image_size)\n        corner_embedding[:, 0, :] += self.point_embeddings[2].weight\n        corner_embedding[:, 1, :] += self.point_embeddings[3].weight\n        return corner_embedding\n\n    def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:\n        \"\"\"Embeds mask inputs.\"\"\"\n        mask_embedding = self.mask_downscaling(masks)\n        return mask_embedding\n\n    def _get_batch_size(\n        self,\n        points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n        boxes: Optional[torch.Tensor],\n        masks: Optional[torch.Tensor],\n    ) -> int:\n        \"\"\"\n        Gets the batch size of the output given the batch size of the input prompts.\n        \"\"\"\n        if points is not None:\n            return points[0].shape[0]\n        elif boxes is not None:\n            return boxes.shape[0]\n        elif masks is not None:\n            return masks.shape[0]\n        else:\n            return 1\n\n    def _get_device(self) -> torch.device:\n        return self.point_embeddings[0].weight.device\n\n    def forward(\n        self,\n        points: Optional[Tuple[torch.Tensor, torch.Tensor]],\n        boxes: Optional[torch.Tensor],\n        masks: Optional[torch.Tensor],\n    ) -> Tuple[torch.Tensor, torch.Tensor]:\n        \"\"\"\n        Embeds different types of prompts, returning both sparse and dense\n        embeddings.\n\n        Arguments:\n          points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates\n            and labels to embed.\n          boxes (torch.Tensor or none): boxes to embed\n          masks (torch.Tensor or none): masks to embed\n\n        Returns:\n          torch.Tensor: sparse embeddings for the points and boxes, with shape\n            BxNx(embed_dim), where N is determined by the number of input points\n            and boxes.\n          torch.Tensor: dense embeddings for the masks, in the shape\n            Bx(embed_dim)x(embed_H)x(embed_W)\n        \"\"\"\n        bs = self._get_batch_size(points, boxes, masks)\n        sparse_embeddings = torch.empty((bs, 0, self.embed_dim), device=self._get_device())\n        if points is not None:\n            coords, labels = points\n            point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))\n            sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)\n        if boxes is not None:\n            box_embeddings = self._embed_boxes(boxes)\n            sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)\n\n        if masks is not None:\n            dense_embeddings = self._embed_masks(masks)\n        else:\n            dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(\n                bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]\n            )\n\n        return sparse_embeddings, dense_embeddings\n\n\nclass PositionEmbeddingRandom(nn.Module):\n    \"\"\"\n    Positional encoding using random spatial frequencies.\n    \"\"\"\n\n    def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:\n        super().__init__()\n        if scale is None or scale <= 0.0:\n            scale = 1.0\n        self.register_buffer(\n            \"positional_encoding_gaussian_matrix\",\n            scale * torch.randn((2, num_pos_feats)),\n        )\n\n    def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:\n        \"\"\"Positionally encode points that are normalized to [0,1].\"\"\"\n        # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape\n        coords = 2 * coords - 1\n        coords = coords @ self.positional_encoding_gaussian_matrix\n        coords = 2 * np.pi * coords\n        # outputs d_1 x ... x d_n x C shape\n        return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)\n\n    def forward(self, size: Tuple[int, int]) -> torch.Tensor:\n        \"\"\"Generate positional encoding for a grid of the specified size.\"\"\"\n        h, w = size\n        device: Any = self.positional_encoding_gaussian_matrix.device\n        grid = torch.ones((h, w), device=device, dtype=torch.float32)\n        y_embed = grid.cumsum(dim=0) - 0.5\n        x_embed = grid.cumsum(dim=1) - 0.5\n        y_embed = y_embed / h\n        x_embed = x_embed / w\n\n        pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))\n        return pe.permute(2, 0, 1)  # C x H x W\n\n    def forward_with_coords(\n        self, coords_input: torch.Tensor, image_size: Tuple[int, int]\n    ) -> torch.Tensor:\n        \"\"\"Positionally encode points that are not normalized to [0,1].\"\"\"\n        coords = coords_input.clone()\n        coords[:, :, 0] = coords[:, :, 0] / image_size[1]\n        coords[:, :, 1] = coords[:, :, 1] / image_size[0]\n        return self._pe_encoding(coords.to(torch.float))  # B x N x C\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/modeling/sam.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom typing import Any, Dict, List, Tuple\n\nfrom .image_encoder import ImageEncoderViT\nfrom .mask_decoder import MaskDecoder\nfrom .prompt_encoder import PromptEncoder\n\n\nclass Sam(nn.Module):\n    mask_threshold: float = 0.0\n    image_format: str = \"RGB\"\n\n    def __init__(\n        self,\n        image_encoder: ImageEncoderViT,\n        prompt_encoder: PromptEncoder,\n        mask_decoder: MaskDecoder,\n        pixel_mean: List[float] = [123.675, 116.28, 103.53],\n        pixel_std: List[float] = [58.395, 57.12, 57.375],\n    ) -> None:\n        \"\"\"\n        SAM predicts object masks from an image and input prompts.\n\n        Arguments:\n          image_encoder (ImageEncoderViT): The backbone used to encode the\n            image into image embeddings that allow for efficient mask prediction.\n          prompt_encoder (PromptEncoder): Encodes various types of input prompts.\n          mask_decoder (MaskDecoder): Predicts masks from the image embeddings\n            and encoded prompts.\n          pixel_mean (list(float)): Mean values for normalizing pixels in the input image.\n          pixel_std (list(float)): Std values for normalizing pixels in the input image.\n        \"\"\"\n        super().__init__()\n        self.image_encoder = image_encoder\n        self.prompt_encoder = prompt_encoder\n        self.mask_decoder = mask_decoder\n        self.register_buffer(\"pixel_mean\", torch.Tensor(pixel_mean).view(-1, 1, 1), False)\n        self.register_buffer(\"pixel_std\", torch.Tensor(pixel_std).view(-1, 1, 1), False)\n\n    @property\n    def device(self) -> Any:\n        return self.pixel_mean.device\n\n    @torch.no_grad()\n    def forward(\n        self,\n        batched_input: List[Dict[str, Any]],\n        multimask_output: bool,\n    ) -> List[Dict[str, torch.Tensor]]:\n        \"\"\"\n        Predicts masks end-to-end from provided images and prompts.\n        If prompts are not known in advance, using SamPredictor is\n        recommended over calling the model directly.\n\n        Arguments:\n          batched_input (list(dict)): A list over input images, each a\n            dictionary with the following keys. A prompt key can be\n            excluded if it is not present.\n              'image': The image as a torch tensor in 3xHxW format,\n                already transformed for input to the model.\n              'original_size': (tuple(int, int)) The original size of\n                the image before transformation, as (H, W).\n              'point_coords': (torch.Tensor) Batched point prompts for\n                this image, with shape BxNx2. Already transformed to the\n                input frame of the model.\n              'point_labels': (torch.Tensor) Batched labels for point prompts,\n                with shape BxN.\n              'boxes': (torch.Tensor) Batched box inputs, with shape Bx4.\n                Already transformed to the input frame of the model.\n              'mask_inputs': (torch.Tensor) Batched mask inputs to the model,\n                in the form Bx1xHxW.\n          multimask_output (bool): Whether the model should predict multiple\n            disambiguating masks, or return a single mask.\n\n        Returns:\n          (list(dict)): A list over input images, where each element is\n            as dictionary with the following keys.\n              'masks': (torch.Tensor) Batched binary mask predictions,\n                with shape BxCxHxW, where B is the number of input promts,\n                C is determiend by multimask_output, and (H, W) is the\n                original size of the image.\n              'iou_predictions': (torch.Tensor) The model's predictions\n                of mask quality, in shape BxC.\n              'low_res_logits': (torch.Tensor) Low resolution logits with\n                shape BxCxHxW, where H=W=256. Can be passed as mask input\n                to subsequent iterations of prediction.\n        \"\"\"\n        input_images = torch.stack([self.preprocess(x[\"image\"]) for x in batched_input], dim=0)\n        image_embeddings = self.image_encoder(input_images)\n\n        outputs = []\n        for image_record, curr_embedding in zip(batched_input, image_embeddings):\n            if \"point_coords\" in image_record:\n                points = (image_record[\"point_coords\"], image_record[\"point_labels\"])\n            else:\n                points = None\n            sparse_embeddings, dense_embeddings = self.prompt_encoder(\n                points=points,\n                boxes=image_record.get(\"boxes\", None),\n                masks=image_record.get(\"mask_inputs\", None),\n            )\n            low_res_masks, iou_predictions = self.mask_decoder(\n                image_embeddings=curr_embedding.unsqueeze(0),\n                image_pe=self.prompt_encoder.get_dense_pe(),\n                sparse_prompt_embeddings=sparse_embeddings,\n                dense_prompt_embeddings=dense_embeddings,\n                multimask_output=multimask_output,\n            )\n            masks = self.postprocess_masks(\n                low_res_masks,\n                input_size=image_record[\"image\"].shape[-2:],\n                original_size=image_record[\"original_size\"],\n            )\n            masks = masks > self.mask_threshold\n            outputs.append(\n                {\n                    \"masks\": masks,\n                    \"iou_predictions\": iou_predictions,\n                    \"low_res_logits\": low_res_masks,\n                }\n            )\n        return outputs\n\n    def postprocess_masks(\n        self,\n        masks: torch.Tensor,\n        input_size: Tuple[int, ...],\n        original_size: Tuple[int, ...],\n    ) -> torch.Tensor:\n        \"\"\"\n        Remove padding and upscale masks to the original image size.\n\n        Arguments:\n          masks (torch.Tensor): Batched masks from the mask_decoder,\n            in BxCxHxW format.\n          input_size (tuple(int, int)): The size of the image input to the\n            model, in (H, W) format. Used to remove padding.\n          original_size (tuple(int, int)): The original size of the image\n            before resizing for input to the model, in (H, W) format.\n\n        Returns:\n          (torch.Tensor): Batched masks in BxCxHxW format, where (H, W)\n            is given by original_size.\n        \"\"\"\n        masks = F.interpolate(\n            masks,\n            (self.image_encoder.img_size, self.image_encoder.img_size),\n            mode=\"bilinear\",\n            align_corners=False,\n        )\n        masks = masks[..., : input_size[0], : input_size[1]]\n        masks = F.interpolate(masks, original_size, mode=\"bilinear\", align_corners=False)\n        return masks\n\n    def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n        \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n        # Normalize colors\n        x = (x - self.pixel_mean) / self.pixel_std\n\n        # Pad\n        h, w = x.shape[-2:]\n        padh = self.image_encoder.img_size - h\n        padw = self.image_encoder.img_size - w\n        x = F.pad(x, (0, padw, 0, padh))\n        return x\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/modeling/transformer.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nfrom torch import Tensor, nn\n\nimport math\nfrom typing import Tuple, Type\n\nfrom .common import MLPBlock\n\n\nclass TwoWayTransformer(nn.Module):\n    def __init__(\n        self,\n        depth: int,\n        embedding_dim: int,\n        num_heads: int,\n        mlp_dim: int,\n        activation: Type[nn.Module] = nn.ReLU,\n        attention_downsample_rate: int = 2,\n    ) -> None:\n        \"\"\"\n        A transformer decoder that attends to an input image using\n        queries whose positional embedding is supplied.\n\n        Args:\n          depth (int): number of layers in the transformer\n          embedding_dim (int): the channel dimension for the input embeddings\n          num_heads (int): the number of heads for multihead attention. Must\n            divide embedding_dim\n          mlp_dim (int): the channel dimension internal to the MLP block\n          activation (nn.Module): the activation to use in the MLP block\n        \"\"\"\n        super().__init__()\n        self.depth = depth\n        self.embedding_dim = embedding_dim\n        self.num_heads = num_heads\n        self.mlp_dim = mlp_dim\n        self.layers = nn.ModuleList()\n\n        for i in range(depth):\n            self.layers.append(\n                TwoWayAttentionBlock(\n                    embedding_dim=embedding_dim,\n                    num_heads=num_heads,\n                    mlp_dim=mlp_dim,\n                    activation=activation,\n                    attention_downsample_rate=attention_downsample_rate,\n                    skip_first_layer_pe=(i == 0),\n                )\n            )\n\n        self.final_attn_token_to_image = Attention(\n            embedding_dim, num_heads, downsample_rate=attention_downsample_rate\n        )\n        self.norm_final_attn = nn.LayerNorm(embedding_dim)\n\n    def forward(\n        self,\n        image_embedding: Tensor,\n        image_pe: Tensor,\n        point_embedding: Tensor,\n    ) -> Tuple[Tensor, Tensor]:\n        \"\"\"\n        Args:\n          image_embedding (torch.Tensor): image to attend to. Should be shape\n            B x embedding_dim x h x w for any h and w.\n          image_pe (torch.Tensor): the positional encoding to add to the image. Must\n            have the same shape as image_embedding.\n          point_embedding (torch.Tensor): the embedding to add to the query points.\n            Must have shape B x N_points x embedding_dim for any N_points.\n\n        Returns:\n          torch.Tensor: the processed point_embedding\n          torch.Tensor: the processed image_embedding\n        \"\"\"\n        # BxCxHxW -> BxHWxC == B x N_image_tokens x C\n        bs, c, h, w = image_embedding.shape\n        image_embedding = image_embedding.flatten(2).permute(0, 2, 1)\n        image_pe = image_pe.flatten(2).permute(0, 2, 1)\n\n        # Prepare queries\n        queries = point_embedding\n        keys = image_embedding\n\n        # Apply transformer blocks and final layernorm\n        for layer in self.layers:\n            queries, keys = layer(\n                queries=queries,\n                keys=keys,\n                query_pe=point_embedding,\n                key_pe=image_pe,\n            )\n\n        # Apply the final attenion layer from the points to the image\n        q = queries + point_embedding\n        k = keys + image_pe\n        attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)\n        queries = queries + attn_out\n        queries = self.norm_final_attn(queries)\n\n        return queries, keys\n\n\nclass TwoWayAttentionBlock(nn.Module):\n    def __init__(\n        self,\n        embedding_dim: int,\n        num_heads: int,\n        mlp_dim: int = 2048,\n        activation: Type[nn.Module] = nn.ReLU,\n        attention_downsample_rate: int = 2,\n        skip_first_layer_pe: bool = False,\n    ) -> None:\n        \"\"\"\n        A transformer block with four layers: (1) self-attention of sparse\n        inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp\n        block on sparse inputs, and (4) cross attention of dense inputs to sparse\n        inputs.\n\n        Arguments:\n          embedding_dim (int): the channel dimension of the embeddings\n          num_heads (int): the number of heads in the attention layers\n          mlp_dim (int): the hidden dimension of the mlp block\n          activation (nn.Module): the activation of the mlp block\n          skip_first_layer_pe (bool): skip the PE on the first layer\n        \"\"\"\n        super().__init__()\n        self.self_attn = Attention(embedding_dim, num_heads)\n        self.norm1 = nn.LayerNorm(embedding_dim)\n\n        self.cross_attn_token_to_image = Attention(\n            embedding_dim, num_heads, downsample_rate=attention_downsample_rate\n        )\n        self.norm2 = nn.LayerNorm(embedding_dim)\n\n        self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)\n        self.norm3 = nn.LayerNorm(embedding_dim)\n\n        self.norm4 = nn.LayerNorm(embedding_dim)\n        self.cross_attn_image_to_token = Attention(\n            embedding_dim, num_heads, downsample_rate=attention_downsample_rate\n        )\n\n        self.skip_first_layer_pe = skip_first_layer_pe\n\n    def forward(\n        self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor\n    ) -> Tuple[Tensor, Tensor]:\n        # Self attention block\n        if self.skip_first_layer_pe:\n            queries = self.self_attn(q=queries, k=queries, v=queries)\n        else:\n            q = queries + query_pe\n            attn_out = self.self_attn(q=q, k=q, v=queries)\n            queries = queries + attn_out\n        queries = self.norm1(queries)\n\n        # Cross attention block, tokens attending to image embedding\n        q = queries + query_pe\n        k = keys + key_pe\n        attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)\n        queries = queries + attn_out\n        queries = self.norm2(queries)\n\n        # MLP block\n        mlp_out = self.mlp(queries)\n        queries = queries + mlp_out\n        queries = self.norm3(queries)\n\n        # Cross attention block, image embedding attending to tokens\n        q = queries + query_pe\n        k = keys + key_pe\n        attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)\n        keys = keys + attn_out\n        keys = self.norm4(keys)\n\n        return queries, keys\n\n\nclass Attention(nn.Module):\n    \"\"\"\n    An attention layer that allows for downscaling the size of the embedding\n    after projection to queries, keys, and values.\n    \"\"\"\n\n    def __init__(\n        self,\n        embedding_dim: int,\n        num_heads: int,\n        downsample_rate: int = 1,\n    ) -> None:\n        super().__init__()\n        self.embedding_dim = embedding_dim\n        self.internal_dim = embedding_dim // downsample_rate\n        self.num_heads = num_heads\n        assert self.internal_dim % num_heads == 0, \"num_heads must divide embedding_dim.\"\n\n        self.q_proj = nn.Linear(embedding_dim, self.internal_dim)\n        self.k_proj = nn.Linear(embedding_dim, self.internal_dim)\n        self.v_proj = nn.Linear(embedding_dim, self.internal_dim)\n        self.out_proj = nn.Linear(self.internal_dim, embedding_dim)\n\n    def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:\n        b, n, c = x.shape\n        x = x.reshape(b, n, num_heads, c // num_heads)\n        return x.transpose(1, 2)  # B x N_heads x N_tokens x C_per_head\n\n    def _recombine_heads(self, x: Tensor) -> Tensor:\n        b, n_heads, n_tokens, c_per_head = x.shape\n        x = x.transpose(1, 2)\n        return x.reshape(b, n_tokens, n_heads * c_per_head)  # B x N_tokens x C\n\n    def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:\n        # Input projections\n        q = self.q_proj(q)\n        k = self.k_proj(k)\n        v = self.v_proj(v)\n\n        # Separate into heads\n        q = self._separate_heads(q, self.num_heads)\n        k = self._separate_heads(k, self.num_heads)\n        v = self._separate_heads(v, self.num_heads)\n\n        # Attention\n        _, _, _, c_per_head = q.shape\n        attn = q @ k.permute(0, 1, 3, 2)  # B x N_heads x N_tokens x N_tokens\n        attn = attn / math.sqrt(c_per_head)\n        attn = torch.softmax(attn, dim=-1)\n\n        # Get output\n        out = attn @ v\n        out = self._recombine_heads(out)\n        out = self.out_proj(out)\n\n        return out\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/predictor.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\n\nfrom .modeling import Sam\n\nfrom typing import Optional, Tuple\n\nfrom .utils.transforms import ResizeLongestSide\n\n\nclass SamPredictor(nn.Module):\n    def __init__(\n        self,\n        sam_model: Sam,\n    ) -> None:\n        \"\"\"\n        Uses SAM to calculate the image embedding for an image, and then\n        allow repeated, efficient mask prediction given prompts.\n\n        Arguments:\n          sam_model (Sam): The model to use for mask prediction.\n        \"\"\"\n        super().__init__()\n        self.model = sam_model\n        self.transform = ResizeLongestSide(sam_model.image_encoder.img_size)\n        self.reset_image()\n\n    def set_image(\n        self,\n        image: np.ndarray,\n        image_format: str = \"RGB\",\n    ) -> None:\n        \"\"\"\n        Calculates the image embeddings for the provided image, allowing\n        masks to be predicted with the 'predict' method.\n\n        Arguments:\n          image (np.ndarray): The image for calculating masks. Expects an\n            image in HWC uint8 format, with pixel values in [0, 255].\n          image_format (str): The color format of the image, in ['RGB', 'BGR'].\n        \"\"\"\n        assert image_format in [\n            \"RGB\",\n            \"BGR\",\n        ], f\"image_format must be in ['RGB', 'BGR'], is {image_format}.\"\n        if image_format != self.model.image_format:\n            image = image[..., ::-1]\n\n        # Transform the image to the form expected by the model\n        input_image = self.transform.apply_image(image)\n        input_image_torch = torch.as_tensor(input_image, device=self.device)\n        input_image_torch = input_image_torch.permute(2, 0, 1).contiguous()[None, :, :, :]\n\n        self.set_torch_image(input_image_torch, image.shape[:2])\n\n    @torch.no_grad()\n    def set_torch_image(\n        self,\n        transformed_image: torch.Tensor,\n        original_image_size: Tuple[int, ...],\n    ) -> None:\n        \"\"\"\n        Calculates the image embeddings for the provided image, allowing\n        masks to be predicted with the 'predict' method. Expects the input\n        image to be already transformed to the format expected by the model.\n\n        Arguments:\n          transformed_image (torch.Tensor): The input image, with shape\n            1x3xHxW, which has been transformed with ResizeLongestSide.\n          original_image_size (tuple(int, int)): The size of the image\n            before transformation, in (H, W) format.\n        \"\"\"\n        assert (\n            len(transformed_image.shape) == 4\n            and transformed_image.shape[1] == 3\n            and max(*transformed_image.shape[2:]) == self.model.image_encoder.img_size\n        ), f\"set_torch_image input must be BCHW with long side {self.model.image_encoder.img_size}.\"\n        self.reset_image()\n\n        self.original_size = original_image_size\n        self.input_size = tuple(transformed_image.shape[-2:])\n        input_image = self.model.preprocess(transformed_image)\n        self.features = self.model.image_encoder(input_image)\n        self.is_image_set = True\n\n    def predict(\n        self,\n        point_coords: Optional[np.ndarray] = None,\n        point_labels: Optional[np.ndarray] = None,\n        box: Optional[np.ndarray] = None,\n        mask_input: Optional[np.ndarray] = None,\n        multimask_output: bool = True,\n        return_logits: bool = False,\n    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n        \"\"\"\n        Predict masks for the given input prompts, using the currently set image.\n\n        Arguments:\n          point_coords (np.ndarray or None): A Nx2 array of point prompts to the\n            model. Each point is in (X,Y) in pixels.\n          point_labels (np.ndarray or None): A length N array of labels for the\n            point prompts. 1 indicates a foreground point and 0 indicates a\n            background point.\n          box (np.ndarray or None): A length 4 array given a box prompt to the\n            model, in XYXY format.\n          mask_input (np.ndarray): A low resolution mask input to the model, typically\n            coming from a previous prediction iteration. Has form 1xHxW, where\n            for SAM, H=W=256.\n          multimask_output (bool): If true, the model will return three masks.\n            For ambiguous input prompts (such as a single click), this will often\n            produce better masks than a single prediction. If only a single\n            mask is needed, the model's predicted quality score can be used\n            to select the best mask. For non-ambiguous prompts, such as multiple\n            input prompts, multimask_output=False can give better results.\n          return_logits (bool): If true, returns un-thresholded masks logits\n            instead of a binary mask.\n\n        Returns:\n          (np.ndarray): The output masks in CxHxW format, where C is the\n            number of masks, and (H, W) is the original image size.\n          (np.ndarray): An array of length C containing the model's\n            predictions for the quality of each mask.\n          (np.ndarray): An array of shape CxHxW, where C is the number\n            of masks and H=W=256. These low resolution logits can be passed to\n            a subsequent iteration as mask input.\n        \"\"\"\n        if not self.is_image_set:\n            raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n        # Transform input prompts\n        coords_torch, labels_torch, box_torch, mask_input_torch = None, None, None, None\n        if point_coords is not None:\n            assert (\n                point_labels is not None\n            ), \"point_labels must be supplied if point_coords is supplied.\"\n            point_coords = self.transform.apply_coords(point_coords, self.original_size)\n            coords_torch = torch.as_tensor(point_coords, dtype=torch.float, device=self.device)\n            labels_torch = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)\n            coords_torch, labels_torch = coords_torch[None, :, :], labels_torch[None, :]\n        if box is not None:\n            box = self.transform.apply_boxes(box, self.original_size)\n            box_torch = torch.as_tensor(box, dtype=torch.float, device=self.device)\n            box_torch = box_torch[None, :]\n        if mask_input is not None:\n            mask_input_torch = torch.as_tensor(mask_input, dtype=torch.float, device=self.device)\n            mask_input_torch = mask_input_torch[None, :, :, :]\n\n        masks, iou_predictions, low_res_masks = self.predict_torch(\n            coords_torch,\n            labels_torch,\n            box_torch,\n            mask_input_torch,\n            multimask_output,\n            return_logits=return_logits,\n        )\n\n        masks = masks[0].detach().cpu().numpy()\n        iou_predictions = iou_predictions[0].detach().cpu().numpy()\n        low_res_masks = low_res_masks[0].detach().cpu().numpy()\n        return masks, iou_predictions, low_res_masks\n\n    @torch.no_grad()\n    def predict_torch(\n        self,\n        point_coords: Optional[torch.Tensor],\n        point_labels: Optional[torch.Tensor],\n        boxes: Optional[torch.Tensor] = None,\n        mask_input: Optional[torch.Tensor] = None,\n        multimask_output: bool = True,\n        return_logits: bool = False,\n    ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n        \"\"\"\n        Predict masks for the given input prompts, using the currently set image.\n        Input prompts are batched torch tensors and are expected to already be\n        transformed to the input frame using ResizeLongestSide.\n\n        Arguments:\n          point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the\n            model. Each point is in (X,Y) in pixels.\n          point_labels (torch.Tensor or None): A BxN array of labels for the\n            point prompts. 1 indicates a foreground point and 0 indicates a\n            background point.\n          box (np.ndarray or None): A Bx4 array given a box prompt to the\n            model, in XYXY format.\n          mask_input (np.ndarray): A low resolution mask input to the model, typically\n            coming from a previous prediction iteration. Has form Bx1xHxW, where\n            for SAM, H=W=256. Masks returned by a previous iteration of the\n            predict method do not need further transformation.\n          multimask_output (bool): If true, the model will return three masks.\n            For ambiguous input prompts (such as a single click), this will often\n            produce better masks than a single prediction. If only a single\n            mask is needed, the model's predicted quality score can be used\n            to select the best mask. For non-ambiguous prompts, such as multiple\n            input prompts, multimask_output=False can give better results.\n          return_logits (bool): If true, returns un-thresholded masks logits\n            instead of a binary mask.\n\n        Returns:\n          (torch.Tensor): The output masks in BxCxHxW format, where C is the\n            number of masks, and (H, W) is the original image size.\n          (torch.Tensor): An array of shape BxC containing the model's\n            predictions for the quality of each mask.\n          (torch.Tensor): An array of shape BxCxHxW, where C is the number\n            of masks and H=W=256. These low res logits can be passed to\n            a subsequent iteration as mask input.\n        \"\"\"\n        if not self.is_image_set:\n            raise RuntimeError(\"An image must be set with .set_image(...) before mask prediction.\")\n\n        if point_coords is not None:\n            points = (point_coords, point_labels)\n        else:\n            points = None\n\n        # Embed prompts\n        sparse_embeddings, dense_embeddings = self.model.prompt_encoder(\n            points=points,\n            boxes=boxes,\n            masks=mask_input,\n        )\n\n        # Predict masks\n        low_res_masks, iou_predictions = self.model.mask_decoder(\n            image_embeddings=self.features,\n            image_pe=self.model.prompt_encoder.get_dense_pe(),\n            sparse_prompt_embeddings=sparse_embeddings,\n            dense_prompt_embeddings=dense_embeddings,\n            multimask_output=multimask_output,\n        )\n\n        # Upscale the masks to the original image resolution\n        masks = self.model.postprocess_masks(low_res_masks, self.input_size, self.original_size)\n\n        if not return_logits:\n            masks = masks > self.model.mask_threshold\n\n        return masks, iou_predictions, low_res_masks\n\n    def get_image_embedding(self) -> torch.Tensor:\n        \"\"\"\n        Returns the image embeddings for the currently set image, with\n        shape 1xCxHxW, where C is the embedding dimension and (H,W) are\n        the embedding spatial dimension of SAM (typically C=256, H=W=64).\n        \"\"\"\n        if not self.is_image_set:\n            raise RuntimeError(\n                \"An image must be set with .set_image(...) to generate an embedding.\"\n            )\n        assert self.features is not None, \"Features must exist if an image has been set.\"\n        return self.features\n\n    @property\n    def device(self) -> torch.device:\n        return self.model.device\n\n    def reset_image(self) -> None:\n        \"\"\"Resets the currently set image.\"\"\"\n        self.is_image_set = False\n        self.features = None\n        self.orig_h = None\n        self.orig_w = None\n        self.input_h = None\n        self.input_w = None\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/utils/__init__.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/utils/amg.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nimport torch\n\nimport math\nfrom copy import deepcopy\nfrom itertools import product\nfrom typing import Any, Dict, Generator, ItemsView, List, Tuple\n\n\nclass MaskData:\n    \"\"\"\n    A structure for storing masks and their related data in batched format.\n    Implements basic filtering and concatenation.\n    \"\"\"\n\n    def __init__(self, **kwargs) -> None:\n        for v in kwargs.values():\n            assert isinstance(\n                v, (list, np.ndarray, torch.Tensor)\n            ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n        self._stats = dict(**kwargs)\n\n    def __setitem__(self, key: str, item: Any) -> None:\n        assert isinstance(\n            item, (list, np.ndarray, torch.Tensor)\n        ), \"MaskData only supports list, numpy arrays, and torch tensors.\"\n        self._stats[key] = item\n\n    def __delitem__(self, key: str) -> None:\n        del self._stats[key]\n\n    def __getitem__(self, key: str) -> Any:\n        return self._stats[key]\n\n    def items(self) -> ItemsView[str, Any]:\n        return self._stats.items()\n\n    def filter(self, keep: torch.Tensor) -> None:\n        for k, v in self._stats.items():\n            if v is None:\n                self._stats[k] = None\n            elif isinstance(v, torch.Tensor):\n                self._stats[k] = v[torch.as_tensor(keep, device=v.device)]\n            elif isinstance(v, np.ndarray):\n                self._stats[k] = v[keep.detach().cpu().numpy()]\n            elif isinstance(v, list) and keep.dtype == torch.bool:\n                self._stats[k] = [a for i, a in enumerate(v) if keep[i]]\n            elif isinstance(v, list):\n                self._stats[k] = [v[i] for i in keep]\n            else:\n                raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n    def cat(self, new_stats: \"MaskData\") -> None:\n        for k, v in new_stats.items():\n            if k not in self._stats or self._stats[k] is None:\n                self._stats[k] = deepcopy(v)\n            elif isinstance(v, torch.Tensor):\n                self._stats[k] = torch.cat([self._stats[k], v], dim=0)\n            elif isinstance(v, np.ndarray):\n                self._stats[k] = np.concatenate([self._stats[k], v], axis=0)\n            elif isinstance(v, list):\n                self._stats[k] = self._stats[k] + deepcopy(v)\n            else:\n                raise TypeError(f\"MaskData key {k} has an unsupported type {type(v)}.\")\n\n    def to_numpy(self) -> None:\n        for k, v in self._stats.items():\n            if isinstance(v, torch.Tensor):\n                self._stats[k] = v.detach().cpu().numpy()\n\n\ndef is_box_near_crop_edge(\n    boxes: torch.Tensor, crop_box: List[int], orig_box: List[int], atol: float = 20.0\n) -> torch.Tensor:\n    \"\"\"Filter masks at the edge of a crop, but not at the edge of the original image.\"\"\"\n    crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)\n    orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)\n    boxes = uncrop_boxes_xyxy(boxes, crop_box).float()\n    near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)\n    near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)\n    near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)\n    return torch.any(near_crop_edge, dim=1)\n\n\ndef box_xyxy_to_xywh(box_xyxy: torch.Tensor) -> torch.Tensor:\n    box_xywh = deepcopy(box_xyxy)\n    box_xywh[2] = box_xywh[2] - box_xywh[0]\n    box_xywh[3] = box_xywh[3] - box_xywh[1]\n    return box_xywh\n\n\ndef batch_iterator(batch_size: int, *args) -> Generator[List[Any], None, None]:\n    assert len(args) > 0 and all(\n        len(a) == len(args[0]) for a in args\n    ), \"Batched iteration must have inputs of all the same size.\"\n    n_batches = len(args[0]) // batch_size + int(len(args[0]) % batch_size != 0)\n    for b in range(n_batches):\n        yield [arg[b * batch_size : (b + 1) * batch_size] for arg in args]\n\n\ndef mask_to_rle_pytorch(tensor: torch.Tensor) -> List[Dict[str, Any]]:\n    \"\"\"\n    Encodes masks to an uncompressed RLE, in the format expected by\n    pycoco tools.\n    \"\"\"\n    # Put in fortran order and flatten h,w\n    b, h, w = tensor.shape\n    tensor = tensor.permute(0, 2, 1).flatten(1)\n\n    # Compute change indices\n    diff = tensor[:, 1:] ^ tensor[:, :-1]\n    change_indices = diff.nonzero()\n\n    # Encode run length\n    out = []\n    for i in range(b):\n        cur_idxs = change_indices[change_indices[:, 0] == i, 1]\n        cur_idxs = torch.cat(\n            [\n                torch.tensor([0], dtype=cur_idxs.dtype, device=cur_idxs.device),\n                cur_idxs + 1,\n                torch.tensor([h * w], dtype=cur_idxs.dtype, device=cur_idxs.device),\n            ]\n        )\n        btw_idxs = cur_idxs[1:] - cur_idxs[:-1]\n        counts = [] if tensor[i, 0] == 0 else [0]\n        counts.extend(btw_idxs.detach().cpu().tolist())\n        out.append({\"size\": [h, w], \"counts\": counts})\n    return out\n\n\ndef rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:\n    \"\"\"Compute a binary mask from an uncompressed RLE.\"\"\"\n    h, w = rle[\"size\"]\n    mask = np.empty(h * w, dtype=bool)\n    idx = 0\n    parity = False\n    for count in rle[\"counts\"]:\n        mask[idx : idx + count] = parity\n        idx += count\n        parity ^= True\n    mask = mask.reshape(w, h)\n    return mask.transpose()  # Put in C order\n\n\ndef area_from_rle(rle: Dict[str, Any]) -> int:\n    return sum(rle[\"counts\"][1::2])\n\n\ndef calculate_stability_score(\n    masks: torch.Tensor, mask_threshold: float, threshold_offset: float\n) -> torch.Tensor:\n    \"\"\"\n    Computes the stability score for a batch of masks. The stability\n    score is the IoU between the binary masks obtained by thresholding\n    the predicted mask logits at high and low values.\n    \"\"\"\n    # One mask is always contained inside the other.\n    # Save memory by preventing unnecesary cast to torch.int64\n    intersections = (\n        (masks > (mask_threshold + threshold_offset))\n        .sum(-1, dtype=torch.int16)\n        .sum(-1, dtype=torch.int32)\n    )\n    unions = (\n        (masks > (mask_threshold - threshold_offset))\n        .sum(-1, dtype=torch.int16)\n        .sum(-1, dtype=torch.int32)\n    )\n    return intersections / unions\n\n\ndef build_point_grid(n_per_side: int) -> np.ndarray:\n    \"\"\"Generates a 2D grid of points evenly spaced in [0,1]x[0,1].\"\"\"\n    offset = 1 / (2 * n_per_side)\n    points_one_side = np.linspace(offset, 1 - offset, n_per_side)\n    points_x = np.tile(points_one_side[None, :], (n_per_side, 1))\n    points_y = np.tile(points_one_side[:, None], (1, n_per_side))\n    points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)\n    return points\n\n\ndef build_all_layer_point_grids(\n    n_per_side: int, n_layers: int, scale_per_layer: int\n) -> List[np.ndarray]:\n    \"\"\"Generates point grids for all crop layers.\"\"\"\n    points_by_layer = []\n    for i in range(n_layers + 1):\n        n_points = int(n_per_side / (scale_per_layer**i))\n        points_by_layer.append(build_point_grid(n_points))\n    return points_by_layer\n\n\ndef generate_crop_boxes(\n    im_size: Tuple[int, ...], n_layers: int, overlap_ratio: float\n) -> Tuple[List[List[int]], List[int]]:\n    \"\"\"\n    Generates a list of crop boxes of different sizes. Each layer\n    has (2**i)**2 boxes for the ith layer.\n    \"\"\"\n    crop_boxes, layer_idxs = [], []\n    im_h, im_w = im_size\n    short_side = min(im_h, im_w)\n\n    # Original image\n    crop_boxes.append([0, 0, im_w, im_h])\n    layer_idxs.append(0)\n\n    def crop_len(orig_len, n_crops, overlap):\n        return int(math.ceil((overlap * (n_crops - 1) + orig_len) / n_crops))\n\n    for i_layer in range(n_layers):\n        n_crops_per_side = 2 ** (i_layer + 1)\n        overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))\n\n        crop_w = crop_len(im_w, n_crops_per_side, overlap)\n        crop_h = crop_len(im_h, n_crops_per_side, overlap)\n\n        crop_box_x0 = [int((crop_w - overlap) * i) for i in range(n_crops_per_side)]\n        crop_box_y0 = [int((crop_h - overlap) * i) for i in range(n_crops_per_side)]\n\n        # Crops in XYWH format\n        for x0, y0 in product(crop_box_x0, crop_box_y0):\n            box = [x0, y0, min(x0 + crop_w, im_w), min(y0 + crop_h, im_h)]\n            crop_boxes.append(box)\n            layer_idxs.append(i_layer + 1)\n\n    return crop_boxes, layer_idxs\n\n\ndef uncrop_boxes_xyxy(boxes: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n    x0, y0, _, _ = crop_box\n    offset = torch.tensor([[x0, y0, x0, y0]], device=boxes.device)\n    # Check if boxes has a channel dimension\n    if len(boxes.shape) == 3:\n        offset = offset.unsqueeze(1)\n    return boxes + offset\n\n\ndef uncrop_points(points: torch.Tensor, crop_box: List[int]) -> torch.Tensor:\n    x0, y0, _, _ = crop_box\n    offset = torch.tensor([[x0, y0]], device=points.device)\n    # Check if points has a channel dimension\n    if len(points.shape) == 3:\n        offset = offset.unsqueeze(1)\n    return points + offset\n\n\ndef uncrop_masks(\n    masks: torch.Tensor, crop_box: List[int], orig_h: int, orig_w: int\n) -> torch.Tensor:\n    x0, y0, x1, y1 = crop_box\n    if x0 == 0 and y0 == 0 and x1 == orig_w and y1 == orig_h:\n        return masks\n    # Coordinate transform masks\n    pad_x, pad_y = orig_w - (x1 - x0), orig_h - (y1 - y0)\n    pad = (x0, pad_x - x0, y0, pad_y - y0)\n    return torch.nn.functional.pad(masks, pad, value=0)\n\n\ndef remove_small_regions(\n    mask: np.ndarray, area_thresh: float, mode: str\n) -> Tuple[np.ndarray, bool]:\n    \"\"\"\n    Removes small disconnected regions and holes in a mask. Returns the\n    mask and an indicator of if the mask has been modified.\n    \"\"\"\n    import cv2  # type: ignore\n\n    assert mode in [\"holes\", \"islands\"]\n    correct_holes = mode == \"holes\"\n    working_mask = (correct_holes ^ mask).astype(np.uint8)\n    n_labels, regions, stats, _ = cv2.connectedComponentsWithStats(working_mask, 8)\n    sizes = stats[:, -1][1:]  # Row 0 is background label\n    small_regions = [i + 1 for i, s in enumerate(sizes) if s < area_thresh]\n    if len(small_regions) == 0:\n        return mask, False\n    fill_labels = [0] + small_regions\n    if not correct_holes:\n        fill_labels = [i for i in range(n_labels) if i not in fill_labels]\n        # If every region is below threshold, keep largest\n        if len(fill_labels) == 0:\n            fill_labels = [int(np.argmax(sizes)) + 1]\n    mask = np.isin(regions, fill_labels)\n    return mask, True\n\n\ndef coco_encode_rle(uncompressed_rle: Dict[str, Any]) -> Dict[str, Any]:\n    from pycocotools import mask as mask_utils  # type: ignore\n\n    h, w = uncompressed_rle[\"size\"]\n    rle = mask_utils.frPyObjects(uncompressed_rle, h, w)\n    rle[\"counts\"] = rle[\"counts\"].decode(\"utf-8\")  # Necessary to serialize with json\n    return rle\n\n\ndef batched_mask_to_box(masks: torch.Tensor) -> torch.Tensor:\n    \"\"\"\n    Calculates boxes in XYXY format around masks. Return [0,0,0,0] for\n    an empty mask. For input shape C1xC2x...xHxW, the output shape is C1xC2x...x4.\n    \"\"\"\n    # torch.max below raises an error on empty inputs, just skip in this case\n    if torch.numel(masks) == 0:\n        return torch.zeros(*masks.shape[:-2], 4, device=masks.device)\n\n    # Normalize shape to CxHxW\n    shape = masks.shape\n    h, w = shape[-2:]\n    if len(shape) > 2:\n        masks = masks.flatten(0, -3)\n    else:\n        masks = masks.unsqueeze(0)\n\n    # Get top and bottom edges\n    in_height, _ = torch.max(masks, dim=-1)\n    in_height_coords = in_height * torch.arange(h, device=in_height.device)[None, :]\n    bottom_edges, _ = torch.max(in_height_coords, dim=-1)\n    in_height_coords = in_height_coords + h * (~in_height)\n    top_edges, _ = torch.min(in_height_coords, dim=-1)\n\n    # Get left and right edges\n    in_width, _ = torch.max(masks, dim=-2)\n    in_width_coords = in_width * torch.arange(w, device=in_width.device)[None, :]\n    right_edges, _ = torch.max(in_width_coords, dim=-1)\n    in_width_coords = in_width_coords + w * (~in_width)\n    left_edges, _ = torch.min(in_width_coords, dim=-1)\n\n    # If the mask is empty the right edge will be to the left of the left edge.\n    # Replace these boxes with [0, 0, 0, 0]\n    empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)\n    out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)\n    out = out * (~empty_filter).unsqueeze(-1)\n\n    # Return to original shape\n    if len(shape) > 2:\n        out = out.reshape(*shape[:-2], 4)\n    else:\n        out = out[0]\n\n    return out\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/utils/onnx.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nfrom typing import Tuple\n\nfrom ..modeling import Sam\nfrom .amg import calculate_stability_score\n\n\nclass SamOnnxModel(nn.Module):\n    \"\"\"\n    This model should not be called directly, but is used in ONNX export.\n    It combines the prompt encoder, mask decoder, and mask postprocessing of Sam,\n    with some functions modified to enable model tracing. Also supports extra\n    options controlling what information. See the ONNX export script for details.\n    \"\"\"\n\n    def __init__(\n        self,\n        model: Sam,\n        return_single_mask: bool,\n        use_stability_score: bool = False,\n        return_extra_metrics: bool = False,\n    ) -> None:\n        super().__init__()\n        self.mask_decoder = model.mask_decoder\n        self.model = model\n        self.img_size = model.image_encoder.img_size\n        self.return_single_mask = return_single_mask\n        self.use_stability_score = use_stability_score\n        self.stability_score_offset = 1.0\n        self.return_extra_metrics = return_extra_metrics\n\n    @staticmethod\n    def resize_longest_image_size(\n        input_image_size: torch.Tensor, longest_side: int\n    ) -> torch.Tensor:\n        input_image_size = input_image_size.to(torch.float32)\n        scale = longest_side / torch.max(input_image_size)\n        transformed_size = scale * input_image_size\n        transformed_size = torch.floor(transformed_size + 0.5).to(torch.int64)\n        return transformed_size\n\n    def _embed_points(self, point_coords: torch.Tensor, point_labels: torch.Tensor) -> torch.Tensor:\n        point_coords = point_coords + 0.5\n        point_coords = point_coords / self.img_size\n        point_embedding = self.model.prompt_encoder.pe_layer._pe_encoding(point_coords)\n        point_labels = point_labels.unsqueeze(-1).expand_as(point_embedding)\n\n        point_embedding = point_embedding * (point_labels != -1)\n        point_embedding = point_embedding + self.model.prompt_encoder.not_a_point_embed.weight * (\n            point_labels == -1\n        )\n\n        for i in range(self.model.prompt_encoder.num_point_embeddings):\n            point_embedding = point_embedding + self.model.prompt_encoder.point_embeddings[\n                i\n            ].weight * (point_labels == i)\n\n        return point_embedding\n\n    def _embed_masks(self, input_mask: torch.Tensor, has_mask_input: torch.Tensor) -> torch.Tensor:\n        mask_embedding = has_mask_input * self.model.prompt_encoder.mask_downscaling(input_mask)\n        mask_embedding = mask_embedding + (\n            1 - has_mask_input\n        ) * self.model.prompt_encoder.no_mask_embed.weight.reshape(1, -1, 1, 1)\n        return mask_embedding\n\n    def mask_postprocessing(self, masks: torch.Tensor, orig_im_size: torch.Tensor) -> torch.Tensor:\n        masks = F.interpolate(\n            masks,\n            size=(self.img_size, self.img_size),\n            mode=\"bilinear\",\n            align_corners=False,\n        )\n\n        prepadded_size = self.resize_longest_image_size(orig_im_size, self.img_size)\n        masks = masks[..., : int(prepadded_size[0]), : int(prepadded_size[1])]\n\n        orig_im_size = orig_im_size.to(torch.int64)\n        h, w = orig_im_size[0], orig_im_size[1]\n        masks = F.interpolate(masks, size=(h, w), mode=\"bilinear\", align_corners=False)\n        return masks\n\n    def select_masks(\n        self, masks: torch.Tensor, iou_preds: torch.Tensor, num_points: int\n    ) -> Tuple[torch.Tensor, torch.Tensor]:\n        # Determine if we should return the multiclick mask or not from the number of points.\n        # The reweighting is used to avoid control flow.\n        score_reweight = torch.tensor(\n            [[1000] + [0] * (self.model.mask_decoder.num_mask_tokens - 1)]\n        ).to(iou_preds.device)\n        score = iou_preds + (num_points - 2.5) * score_reweight\n        best_idx = torch.argmax(score, dim=1)\n        masks = masks[torch.arange(masks.shape[0]), best_idx, :, :].unsqueeze(1)\n        iou_preds = iou_preds[torch.arange(masks.shape[0]), best_idx].unsqueeze(1)\n\n        return masks, iou_preds\n\n    @torch.no_grad()\n    def forward(\n        self,\n        image_embeddings: torch.Tensor,\n        point_coords: torch.Tensor,\n        point_labels: torch.Tensor,\n        mask_input: torch.Tensor,\n        has_mask_input: torch.Tensor,\n        orig_im_size: torch.Tensor,\n    ):\n        sparse_embedding = self._embed_points(point_coords, point_labels)\n        dense_embedding = self._embed_masks(mask_input, has_mask_input)\n\n        masks, scores = self.model.mask_decoder.predict_masks(\n            image_embeddings=image_embeddings,\n            image_pe=self.model.prompt_encoder.get_dense_pe(),\n            sparse_prompt_embeddings=sparse_embedding,\n            dense_prompt_embeddings=dense_embedding,\n        )\n\n        if self.use_stability_score:\n            scores = calculate_stability_score(\n                masks, self.model.mask_threshold, self.stability_score_offset\n            )\n\n        if self.return_single_mask:\n            masks, scores = self.select_masks(masks, scores, point_coords.shape[1])\n\n        upscaled_masks = self.mask_postprocessing(masks, orig_im_size)\n\n        if self.return_extra_metrics:\n            stability_scores = calculate_stability_score(\n                upscaled_masks, self.model.mask_threshold, self.stability_score_offset\n            )\n            areas = (upscaled_masks > self.model.mask_threshold).sum(-1).sum(-1)\n            return upscaled_masks, scores, stability_scores, areas, masks\n\n        return upscaled_masks, scores, masks\n"
  },
  {
    "path": "projects/instance_segment_anything/models/segment_anything/utils/transforms.py",
    "content": "# Copyright (c) Meta Platforms, Inc. and affiliates.\n# All rights reserved.\n\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport numpy as np\nimport torch\nfrom torch.nn import functional as F\nfrom torchvision.transforms.functional import resize, to_pil_image  # type: ignore\n\nfrom copy import deepcopy\nfrom typing import Tuple\n\n\nclass ResizeLongestSide:\n    \"\"\"\n    Resizes images to longest side 'target_length', as well as provides\n    methods for resizing coordinates and boxes. Provides methods for\n    transforming both numpy array and batched torch tensors.\n    \"\"\"\n\n    def __init__(self, target_length: int) -> None:\n        self.target_length = target_length\n\n    def apply_image(self, image: np.ndarray) -> np.ndarray:\n        \"\"\"\n        Expects a numpy array with shape HxWxC in uint8 format.\n        \"\"\"\n        target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)\n        return np.array(resize(to_pil_image(image), target_size))\n\n    def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:\n        \"\"\"\n        Expects a numpy array of length 2 in the final dimension. Requires the\n        original image size in (H, W) format.\n        \"\"\"\n        old_h, old_w = original_size\n        new_h, new_w = self.get_preprocess_shape(\n            original_size[0], original_size[1], self.target_length\n        )\n        coords = deepcopy(coords).astype(float)\n        coords[..., 0] = coords[..., 0] * (new_w / old_w)\n        coords[..., 1] = coords[..., 1] * (new_h / old_h)\n        return coords\n\n    def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:\n        \"\"\"\n        Expects a numpy array shape Bx4. Requires the original image size\n        in (H, W) format.\n        \"\"\"\n        boxes = self.apply_coords(boxes.reshape(-1, 2, 2), original_size)\n        return boxes.reshape(-1, 4)\n\n    def apply_image_torch(self, image: torch.Tensor) -> torch.Tensor:\n        \"\"\"\n        Expects batched images with shape BxCxHxW and float format. This\n        transformation may not exactly match apply_image. apply_image is\n        the transformation expected by the model.\n        \"\"\"\n        # Expects an image in BCHW format. May not exactly match apply_image.\n        target_size = self.get_preprocess_shape(image.shape[0], image.shape[1], self.target_length)\n        return F.interpolate(\n            image, target_size, mode=\"bilinear\", align_corners=False, antialias=True\n        )\n\n    def apply_coords_torch(\n        self, coords: torch.Tensor, original_size: Tuple[int, ...]\n    ) -> torch.Tensor:\n        \"\"\"\n        Expects a torch tensor with length 2 in the last dimension. Requires the\n        original image size in (H, W) format.\n        \"\"\"\n        old_h, old_w = original_size\n        new_h, new_w = self.get_preprocess_shape(\n            original_size[0], original_size[1], self.target_length\n        )\n        coords = deepcopy(coords).to(torch.float)\n        coords[..., 0] = coords[..., 0] * (new_w / old_w)\n        coords[..., 1] = coords[..., 1] * (new_h / old_h)\n        return coords\n\n    def apply_boxes_torch(\n        self, boxes: torch.Tensor, original_size: Tuple[int, ...]\n    ) -> torch.Tensor:\n        \"\"\"\n        Expects a torch tensor with shape Bx4. Requires the original image\n        size in (H, W) format.\n        \"\"\"\n        boxes = self.apply_coords_torch(boxes.reshape(-1, 2, 2), original_size)\n        return boxes.reshape(-1, 4)\n\n    @staticmethod\n    def get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:\n        \"\"\"\n        Compute the output size given input size and target long side length.\n        \"\"\"\n        scale = long_side_length * 1.0 / max(oldh, oldw)\n        newh, neww = oldh * scale, oldw * scale\n        neww = int(neww + 0.5)\n        newh = int(newh + 0.5)\n        return (newh, neww)\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/functions/__init__.py",
    "content": "# ------------------------------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------------------------------\n# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n# ------------------------------------------------------------------------------------------------\n\nfrom .ms_deform_attn_func import MSDeformAttnFunction, ms_deform_attn_core_pytorch\n\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/functions/ms_deform_attn_func.py",
    "content": "# ------------------------------------------------------------------------\n# H-DETR\n# Copyright (c) 2022 Peking University & Microsoft Research Asia. All Rights Reserved.\n# Licensed under the MIT-style license found in the LICENSE file in the root directory\n# ------------------------------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------------------------------\n# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n# ------------------------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport torch\nimport torch.nn.functional as F\nfrom torch.autograd import Function\nfrom torch.autograd.function import once_differentiable\n\ntry:\n    import MultiScaleDeformableAttention as MSDA\nexcept:\n    pass\n\nclass MSDeformAttnFunction(Function):\n    @staticmethod\n    @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)\n    def forward(\n        ctx,\n        value,\n        value_spatial_shapes,\n        value_level_start_index,\n        sampling_locations,\n        attention_weights,\n        im2col_step,\n    ):\n        ctx.im2col_step = im2col_step\n        output = MSDA.ms_deform_attn_forward(\n            value,\n            value_spatial_shapes,\n            value_level_start_index,\n            sampling_locations,\n            attention_weights,\n            ctx.im2col_step,\n        )\n        ctx.save_for_backward(\n            value,\n            value_spatial_shapes,\n            value_level_start_index,\n            sampling_locations,\n            attention_weights,\n        )\n        return output\n\n    @staticmethod\n    @once_differentiable\n    @torch.cuda.amp.custom_bwd\n    def backward(ctx, grad_output):\n        (\n            value,\n            value_spatial_shapes,\n            value_level_start_index,\n            sampling_locations,\n            attention_weights,\n        ) = ctx.saved_tensors\n        grad_value, grad_sampling_loc, grad_attn_weight = MSDA.ms_deform_attn_backward(\n            value,\n            value_spatial_shapes,\n            value_level_start_index,\n            sampling_locations,\n            attention_weights,\n            grad_output,\n            ctx.im2col_step,\n        )\n\n        return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None\n\n\ndef ms_deform_attn_core_pytorch(\n    value, value_spatial_shapes, sampling_locations, attention_weights\n):\n    # for debug and test only,\n    # need to use cuda version instead\n    N_, S_, M_, D_ = value.shape\n    _, Lq_, M_, L_, P_, _ = sampling_locations.shape\n    value_list = value.split([H_ * W_ for H_, W_ in value_spatial_shapes], dim=1)\n    sampling_grids = 2 * sampling_locations - 1\n    sampling_value_list = []\n    for lid_, (H_, W_) in enumerate(value_spatial_shapes):\n        # N_, H_*W_, M_, D_ -> N_, H_*W_, M_*D_ -> N_, M_*D_, H_*W_ -> N_*M_, D_, H_, W_\n        value_l_ = (\n            value_list[lid_].flatten(2).transpose(1, 2).reshape(N_ * M_, D_, H_, W_)\n        )\n        # N_, Lq_, M_, P_, 2 -> N_, M_, Lq_, P_, 2 -> N_*M_, Lq_, P_, 2\n        sampling_grid_l_ = sampling_grids[:, :, :, lid_].transpose(1, 2).flatten(0, 1)\n        # N_*M_, D_, Lq_, P_\n        sampling_value_l_ = F.grid_sample(\n            value_l_,\n            sampling_grid_l_,\n            mode=\"bilinear\",\n            padding_mode=\"zeros\",\n            align_corners=False,\n        )\n        sampling_value_list.append(sampling_value_l_)\n    # (N_, Lq_, M_, L_, P_) -> (N_, M_, Lq_, L_, P_) -> (N_, M_, 1, Lq_, L_*P_)\n    attention_weights = attention_weights.transpose(1, 2).reshape(\n        N_ * M_, 1, Lq_, L_ * P_\n    )\n    output = (\n        (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights)\n        .sum(-1)\n        .view(N_, M_ * D_, Lq_)\n    )\n    return output.transpose(1, 2).contiguous()\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/make.sh",
    "content": "#!/usr/bin/env bash\n# ------------------------------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------------------------------\n# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n# ------------------------------------------------------------------------------------------------\n\npython setup.py build install\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/modules/__init__.py",
    "content": "# ------------------------------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------------------------------\n# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n# ------------------------------------------------------------------------------------------------\n\nfrom .ms_deform_attn import MSDeformAttn\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/modules/ms_deform_attn.py",
    "content": "# ------------------------------------------------------------------------\n# H-DETR\n# Copyright (c) 2022 Peking University & Microsoft Research Asia. All Rights Reserved.\n# Licensed under the MIT-style license found in the LICENSE file in the root directory\n# ------------------------------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------------------------------\n# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n# ------------------------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport warnings\nimport math\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.nn.init import xavier_uniform_, constant_\n\nfrom mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE\n\nfrom ..functions import MSDeformAttnFunction, ms_deform_attn_core_pytorch\n\n\ndef _is_power_of_2(n):\n    if (not isinstance(n, int)) or (n < 0):\n        raise ValueError(\n            \"invalid input for _is_power_of_2: {} (type: {})\".format(n, type(n))\n        )\n    return (n & (n - 1) == 0) and n != 0\n\n\nclass MSDeformAttn(nn.Module):\n    def __init__(self, d_model=256, n_levels=4, n_heads=8, n_points=4):\n        \"\"\"\n        Multi-Scale Deformable Attention Module\n        :param d_model      hidden dimension\n        :param n_levels     number of feature levels\n        :param n_heads      number of attention heads\n        :param n_points     number of sampling points per attention head per feature level\n        \"\"\"\n        super().__init__()\n        if d_model % n_heads != 0:\n            raise ValueError(\n                \"d_model must be divisible by n_heads, but got {} and {}\".format(\n                    d_model, n_heads\n                )\n            )\n        _d_per_head = d_model // n_heads\n        # you'd better set _d_per_head to a power of 2 which is more efficient in our CUDA implementation\n        if not _is_power_of_2(_d_per_head):\n            warnings.warn(\n                \"You'd better set d_model in MSDeformAttn to make the dimension of each attention head a power of 2 \"\n                \"which is more efficient in our CUDA implementation.\"\n            )\n\n        self.im2col_step = 64\n\n        self.d_model = d_model\n        self.n_levels = n_levels\n        self.n_heads = n_heads\n        self.n_points = n_points\n\n        self.sampling_offsets = nn.Linear(d_model, n_heads * n_levels * n_points * 2)\n        self.attention_weights = nn.Linear(d_model, n_heads * n_levels * n_points)\n        self.value_proj = nn.Linear(d_model, d_model)\n        self.output_proj = nn.Linear(d_model, d_model)\n\n        self._reset_parameters()\n\n    def _reset_parameters(self):\n        constant_(self.sampling_offsets.weight.data, 0.0)\n        thetas = torch.arange(self.n_heads, dtype=torch.float32) * (\n            2.0 * math.pi / self.n_heads\n        )\n        grid_init = torch.stack([thetas.cos(), thetas.sin()], -1)\n        grid_init = (\n            (grid_init / grid_init.abs().max(-1, keepdim=True)[0])\n            .view(self.n_heads, 1, 1, 2)\n            .repeat(1, self.n_levels, self.n_points, 1)\n        )\n        for i in range(self.n_points):\n            grid_init[:, :, i, :] *= i + 1\n        with torch.no_grad():\n            self.sampling_offsets.bias.data = grid_init.view(-1)\n        constant_(self.attention_weights.weight.data, 0.0)\n        constant_(self.attention_weights.bias.data, 0.0)\n        xavier_uniform_(self.value_proj.weight.data)\n        constant_(self.value_proj.bias.data, 0.0)\n        xavier_uniform_(self.output_proj.weight.data)\n        constant_(self.output_proj.bias.data, 0.0)\n\n    @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)\n    def forward(\n        self,\n        query,\n        reference_points,\n        input_flatten,\n        input_spatial_shapes,\n        input_level_start_index,\n        input_padding_mask=None,\n    ):\n        \"\"\"\n        :param query                       (N, Length_{query}, C)\n        :param reference_points            (N, Length_{query}, n_levels, 2), range in [0, 1], top-left (0,0), bottom-right (1, 1), including padding area\n                                        or (N, Length_{query}, n_levels, 4), add additional (w, h) to form reference boxes\n        :param input_flatten               (N, \\sum_{l=0}^{L-1} H_l \\cdot W_l, C)\n        :param input_spatial_shapes        (n_levels, 2), [(H_0, W_0), (H_1, W_1), ..., (H_{L-1}, W_{L-1})]\n        :param input_level_start_index     (n_levels, ), [0, H_0*W_0, H_0*W_0+H_1*W_1, H_0*W_0+H_1*W_1+H_2*W_2, ..., H_0*W_0+H_1*W_1+...+H_{L-1}*W_{L-1}]\n        :param input_padding_mask          (N, \\sum_{l=0}^{L-1} H_l \\cdot W_l), True for padding elements, False for non-padding elements\n\n        :return output                     (N, Length_{query}, C)\n        \"\"\"\n        N, Len_q, _ = query.shape\n        N, Len_in, _ = input_flatten.shape\n        assert (input_spatial_shapes[:, 0] * input_spatial_shapes[:, 1]).sum() == Len_in\n\n        value = self.value_proj(input_flatten)\n        if input_padding_mask is not None:\n            value = value.masked_fill(input_padding_mask[..., None], float(0))\n        value = value.view(N, Len_in, self.n_heads, self.d_model // self.n_heads)\n        sampling_offsets = self.sampling_offsets(query).view(\n            N, Len_q, self.n_heads, self.n_levels, self.n_points, 2\n        )\n        attention_weights = self.attention_weights(query).view(\n            N, Len_q, self.n_heads, self.n_levels * self.n_points\n        )\n        attention_weights = F.softmax(attention_weights, -1).view(\n            N, Len_q, self.n_heads, self.n_levels, self.n_points\n        )\n        # N, Len_q, n_heads, n_levels, n_points, 2\n        if reference_points.shape[-1] == 2:\n            offset_normalizer = torch.stack(\n                [input_spatial_shapes[..., 1], input_spatial_shapes[..., 0]], -1\n            )\n            sampling_locations = (\n                reference_points[:, :, None, :, None, :]\n                + sampling_offsets / offset_normalizer[None, None, None, :, None, :]\n            )\n        elif reference_points.shape[-1] == 4:\n            sampling_locations = (\n                reference_points[:, :, None, :, None, :2]\n                + sampling_offsets\n                / self.n_points\n                * reference_points[:, :, None, :, None, 2:]\n                * 0.5\n            )\n        else:\n            raise ValueError(\n                \"Last dim of reference_points must be 2 or 4, but get {} instead.\".format(\n                    reference_points.shape[-1]\n                )\n            )\n        if ((IS_CUDA_AVAILABLE and value.is_cuda)\n                or (IS_MLU_AVAILABLE and value.is_mlu)):\n            output = MSDeformAttnFunction.apply(\n                value,\n                input_spatial_shapes,\n                input_level_start_index,\n                sampling_locations,\n                attention_weights,\n                self.im2col_step,\n            )\n        else:\n            output = ms_deform_attn_core_pytorch(value,\n                                                 input_spatial_shapes,\n                                                 sampling_locations,\n                                                 attention_weights)\n        output = self.output_proj(output)\n        return output\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/setup.py",
    "content": "# ------------------------------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------------------------------\n# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n# ------------------------------------------------------------------------------------------------\n\nimport os\nimport glob\n\nimport torch\n\nfrom torch.utils.cpp_extension import CUDA_HOME\nfrom torch.utils.cpp_extension import CppExtension\nfrom torch.utils.cpp_extension import CUDAExtension\n\nfrom setuptools import find_packages\nfrom setuptools import setup\n\nrequirements = [\"torch\", \"torchvision\"]\n\ndef get_extensions():\n    this_dir = os.path.dirname(os.path.abspath(__file__))\n    extensions_dir = os.path.join(this_dir, \"src\")\n\n    main_file = glob.glob(os.path.join(extensions_dir, \"*.cpp\"))\n    source_cpu = glob.glob(os.path.join(extensions_dir, \"cpu\", \"*.cpp\"))\n    source_cuda = glob.glob(os.path.join(extensions_dir, \"cuda\", \"*.cu\"))\n\n    sources = main_file + source_cpu\n    extension = CppExtension\n    extra_compile_args = {\"cxx\": []}\n    define_macros = []\n\n    if torch.cuda.is_available() and CUDA_HOME is not None:\n        extension = CUDAExtension\n        sources += source_cuda\n        define_macros += [(\"WITH_CUDA\", None)]\n        extra_compile_args[\"nvcc\"] = [\n            \"-DCUDA_HAS_FP16=1\",\n            \"-D__CUDA_NO_HALF_OPERATORS__\",\n            \"-D__CUDA_NO_HALF_CONVERSIONS__\",\n            \"-D__CUDA_NO_HALF2_OPERATORS__\",\n        ]\n    else:\n        raise NotImplementedError('Cuda is not availabel')\n\n    sources = [os.path.join(extensions_dir, s) for s in sources]\n    include_dirs = [extensions_dir]\n    ext_modules = [\n        extension(\n            \"MultiScaleDeformableAttention\",\n            sources,\n            include_dirs=include_dirs,\n            define_macros=define_macros,\n            extra_compile_args=extra_compile_args,\n        )\n    ]\n    return ext_modules\n\nsetup(\n    name=\"MultiScaleDeformableAttention\",\n    version=\"1.0\",\n    author=\"Weijie Su\",\n    url=\"https://github.com/fundamentalvision/Deformable-DETR\",\n    description=\"PyTorch Wrapper for CUDA Functions of Multi-Scale Deformable Attention\",\n    packages=find_packages(exclude=(\"configs\", \"tests\",)),\n    ext_modules=get_extensions(),\n    cmdclass={\"build_ext\": torch.utils.cpp_extension.BuildExtension},\n)\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/src/cpu/ms_deform_attn_cpu.cpp",
    "content": "/*!\n**************************************************************************************************\n* Deformable DETR\n* Copyright (c) 2020 SenseTime. All Rights Reserved.\n* Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n**************************************************************************************************\n* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n**************************************************************************************************\n*/\n\n#include <vector>\n\n#include <ATen/ATen.h>\n#include <ATen/cuda/CUDAContext.h>\n\n\nat::Tensor\nms_deform_attn_cpu_forward(\n    const at::Tensor &value, \n    const at::Tensor &spatial_shapes,\n    const at::Tensor &level_start_index,\n    const at::Tensor &sampling_loc,\n    const at::Tensor &attn_weight,\n    const int im2col_step)\n{\n    AT_ERROR(\"Not implement on cpu\");\n}\n\nstd::vector<at::Tensor>\nms_deform_attn_cpu_backward(\n    const at::Tensor &value, \n    const at::Tensor &spatial_shapes,\n    const at::Tensor &level_start_index,\n    const at::Tensor &sampling_loc,\n    const at::Tensor &attn_weight,\n    const at::Tensor &grad_output,\n    const int im2col_step)\n{\n    AT_ERROR(\"Not implement on cpu\");\n}\n\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/src/cpu/ms_deform_attn_cpu.h",
    "content": "/*!\n**************************************************************************************************\n* Deformable DETR\n* Copyright (c) 2020 SenseTime. All Rights Reserved.\n* Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n**************************************************************************************************\n* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n**************************************************************************************************\n*/\n\n#pragma once\n#include <torch/extension.h>\n\nat::Tensor\nms_deform_attn_cpu_forward(\n    const at::Tensor &value, \n    const at::Tensor &spatial_shapes,\n    const at::Tensor &level_start_index,\n    const at::Tensor &sampling_loc,\n    const at::Tensor &attn_weight,\n    const int im2col_step);\n\nstd::vector<at::Tensor>\nms_deform_attn_cpu_backward(\n    const at::Tensor &value, \n    const at::Tensor &spatial_shapes,\n    const at::Tensor &level_start_index,\n    const at::Tensor &sampling_loc,\n    const at::Tensor &attn_weight,\n    const at::Tensor &grad_output,\n    const int im2col_step);\n\n\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/src/cuda/ms_deform_attn_cuda.cu",
    "content": "/*!\n**************************************************************************************************\n* Deformable DETR\n* Copyright (c) 2020 SenseTime. All Rights Reserved.\n* Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n**************************************************************************************************\n* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n**************************************************************************************************\n*/\n\n#include <vector>\n#include \"cuda/ms_deform_im2col_cuda.cuh\"\n\n#include <ATen/ATen.h>\n#include <ATen/cuda/CUDAContext.h>\n#include <cuda.h>\n#include <cuda_runtime.h>\n\n\nat::Tensor ms_deform_attn_cuda_forward(\n    const at::Tensor &value, \n    const at::Tensor &spatial_shapes,\n    const at::Tensor &level_start_index,\n    const at::Tensor &sampling_loc,\n    const at::Tensor &attn_weight,\n    const int im2col_step)\n{\n    AT_ASSERTM(value.is_contiguous(), \"value tensor has to be contiguous\");\n    AT_ASSERTM(spatial_shapes.is_contiguous(), \"spatial_shapes tensor has to be contiguous\");\n    AT_ASSERTM(level_start_index.is_contiguous(), \"level_start_index tensor has to be contiguous\");\n    AT_ASSERTM(sampling_loc.is_contiguous(), \"sampling_loc tensor has to be contiguous\");\n    AT_ASSERTM(attn_weight.is_contiguous(), \"attn_weight tensor has to be contiguous\");\n\n    AT_ASSERTM(value.type().is_cuda(), \"value must be a CUDA tensor\");\n    AT_ASSERTM(spatial_shapes.type().is_cuda(), \"spatial_shapes must be a CUDA tensor\");\n    AT_ASSERTM(level_start_index.type().is_cuda(), \"level_start_index must be a CUDA tensor\");\n    AT_ASSERTM(sampling_loc.type().is_cuda(), \"sampling_loc must be a CUDA tensor\");\n    AT_ASSERTM(attn_weight.type().is_cuda(), \"attn_weight must be a CUDA tensor\");\n\n    const int batch = value.size(0);\n    const int spatial_size = value.size(1);\n    const int num_heads = value.size(2);\n    const int channels = value.size(3);\n\n    const int num_levels = spatial_shapes.size(0);\n\n    const int num_query = sampling_loc.size(1);\n    const int num_point = sampling_loc.size(4);\n\n    const int im2col_step_ = std::min(batch, im2col_step);\n\n    AT_ASSERTM(batch % im2col_step_ == 0, \"batch(%d) must divide im2col_step(%d)\", batch, im2col_step_);\n    \n    auto output = at::zeros({batch, num_query, num_heads, channels}, value.options());\n\n    const int batch_n = im2col_step_;\n    auto output_n = output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});\n    auto per_value_size = spatial_size * num_heads * channels;\n    auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;\n    auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;\n    for (int n = 0; n < batch/im2col_step_; ++n)\n    {\n        auto columns = output_n.select(0, n);\n        AT_DISPATCH_FLOATING_TYPES(value.type(), \"ms_deform_attn_forward_cuda\", ([&] {\n            ms_deformable_im2col_cuda(at::cuda::getCurrentCUDAStream(),\n                value.data<scalar_t>() + n * im2col_step_ * per_value_size,\n                spatial_shapes.data<int64_t>(),\n                level_start_index.data<int64_t>(),\n                sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,\n                attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,\n                batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,\n                columns.data<scalar_t>());\n\n        }));\n    }\n\n    output = output.view({batch, num_query, num_heads*channels});\n\n    return output;\n}\n\n\nstd::vector<at::Tensor> ms_deform_attn_cuda_backward(\n    const at::Tensor &value, \n    const at::Tensor &spatial_shapes,\n    const at::Tensor &level_start_index,\n    const at::Tensor &sampling_loc,\n    const at::Tensor &attn_weight,\n    const at::Tensor &grad_output,\n    const int im2col_step)\n{\n\n    AT_ASSERTM(value.is_contiguous(), \"value tensor has to be contiguous\");\n    AT_ASSERTM(spatial_shapes.is_contiguous(), \"spatial_shapes tensor has to be contiguous\");\n    AT_ASSERTM(level_start_index.is_contiguous(), \"level_start_index tensor has to be contiguous\");\n    AT_ASSERTM(sampling_loc.is_contiguous(), \"sampling_loc tensor has to be contiguous\");\n    AT_ASSERTM(attn_weight.is_contiguous(), \"attn_weight tensor has to be contiguous\");\n    AT_ASSERTM(grad_output.is_contiguous(), \"grad_output tensor has to be contiguous\");\n\n    AT_ASSERTM(value.type().is_cuda(), \"value must be a CUDA tensor\");\n    AT_ASSERTM(spatial_shapes.type().is_cuda(), \"spatial_shapes must be a CUDA tensor\");\n    AT_ASSERTM(level_start_index.type().is_cuda(), \"level_start_index must be a CUDA tensor\");\n    AT_ASSERTM(sampling_loc.type().is_cuda(), \"sampling_loc must be a CUDA tensor\");\n    AT_ASSERTM(attn_weight.type().is_cuda(), \"attn_weight must be a CUDA tensor\");\n    AT_ASSERTM(grad_output.type().is_cuda(), \"grad_output must be a CUDA tensor\");\n\n    const int batch = value.size(0);\n    const int spatial_size = value.size(1);\n    const int num_heads = value.size(2);\n    const int channels = value.size(3);\n\n    const int num_levels = spatial_shapes.size(0);\n\n    const int num_query = sampling_loc.size(1);\n    const int num_point = sampling_loc.size(4);\n\n    const int im2col_step_ = std::min(batch, im2col_step);\n\n    AT_ASSERTM(batch % im2col_step_ == 0, \"batch(%d) must divide im2col_step(%d)\", batch, im2col_step_);\n\n    auto grad_value = at::zeros_like(value);\n    auto grad_sampling_loc = at::zeros_like(sampling_loc);\n    auto grad_attn_weight = at::zeros_like(attn_weight);\n\n    const int batch_n = im2col_step_;\n    auto per_value_size = spatial_size * num_heads * channels;\n    auto per_sample_loc_size = num_query * num_heads * num_levels * num_point * 2;\n    auto per_attn_weight_size = num_query * num_heads * num_levels * num_point;\n    auto grad_output_n = grad_output.view({batch/im2col_step_, batch_n, num_query, num_heads, channels});\n    \n    for (int n = 0; n < batch/im2col_step_; ++n)\n    {\n        auto grad_output_g = grad_output_n.select(0, n);\n        AT_DISPATCH_FLOATING_TYPES(value.type(), \"ms_deform_attn_backward_cuda\", ([&] {\n            ms_deformable_col2im_cuda(at::cuda::getCurrentCUDAStream(),\n                                    grad_output_g.data<scalar_t>(),\n                                    value.data<scalar_t>() + n * im2col_step_ * per_value_size,\n                                    spatial_shapes.data<int64_t>(),\n                                    level_start_index.data<int64_t>(),\n                                    sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,\n                                    attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size,\n                                    batch_n, spatial_size, num_heads, channels, num_levels, num_query, num_point,\n                                    grad_value.data<scalar_t>() +  n * im2col_step_ * per_value_size,\n                                    grad_sampling_loc.data<scalar_t>() + n * im2col_step_ * per_sample_loc_size,\n                                    grad_attn_weight.data<scalar_t>() + n * im2col_step_ * per_attn_weight_size);\n\n        }));\n    }\n\n    return {\n        grad_value, grad_sampling_loc, grad_attn_weight\n    };\n}"
  },
  {
    "path": "projects/instance_segment_anything/ops/src/cuda/ms_deform_attn_cuda.h",
    "content": "/*!\n**************************************************************************************************\n* Deformable DETR\n* Copyright (c) 2020 SenseTime. All Rights Reserved.\n* Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n**************************************************************************************************\n* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n**************************************************************************************************\n*/\n\n#pragma once\n#include <torch/extension.h>\n\nat::Tensor ms_deform_attn_cuda_forward(\n    const at::Tensor &value, \n    const at::Tensor &spatial_shapes,\n    const at::Tensor &level_start_index,\n    const at::Tensor &sampling_loc,\n    const at::Tensor &attn_weight,\n    const int im2col_step);\n\nstd::vector<at::Tensor> ms_deform_attn_cuda_backward(\n    const at::Tensor &value, \n    const at::Tensor &spatial_shapes,\n    const at::Tensor &level_start_index,\n    const at::Tensor &sampling_loc,\n    const at::Tensor &attn_weight,\n    const at::Tensor &grad_output,\n    const int im2col_step);\n\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/src/cuda/ms_deform_im2col_cuda.cuh",
    "content": "/*!\n**************************************************************************\n* Deformable DETR\n* Copyright (c) 2020 SenseTime. All Rights Reserved.\n* Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n**************************************************************************\n* Modified from DCN (https://github.com/msracver/Deformable-ConvNets)\n* Copyright (c) 2018 Microsoft\n**************************************************************************\n*/\n\n#include <cstdio>\n#include <algorithm>\n#include <cstring>\n\n#include <ATen/ATen.h>\n#include <ATen/cuda/CUDAContext.h>\n\n#include <THC/THCAtomics.cuh>\n\n#define CUDA_KERNEL_LOOP(i, n)                          \\\n  for (int i = blockIdx.x * blockDim.x + threadIdx.x;   \\\n      i < (n);                                          \\\n      i += blockDim.x * gridDim.x)\n\nconst int CUDA_NUM_THREADS = 1024;\ninline int GET_BLOCKS(const int N, const int num_threads)\n{\n  return (N + num_threads - 1) / num_threads;\n}\n\n\ntemplate <typename scalar_t>\n__device__ scalar_t ms_deform_attn_im2col_bilinear(const scalar_t* &bottom_data, \n                                                   const int &height, const int &width, const int &nheads, const int &channels,\n                                                   const scalar_t &h, const scalar_t &w, const int &m, const int &c)\n{\n  const int h_low = floor(h);\n  const int w_low = floor(w);\n  const int h_high = h_low + 1;\n  const int w_high = w_low + 1;\n\n  const scalar_t lh = h - h_low;\n  const scalar_t lw = w - w_low;\n  const scalar_t hh = 1 - lh, hw = 1 - lw;\n\n  const int w_stride = nheads * channels;\n  const int h_stride = width * w_stride;\n  const int h_low_ptr_offset = h_low * h_stride;\n  const int h_high_ptr_offset = h_low_ptr_offset + h_stride;\n  const int w_low_ptr_offset = w_low * w_stride;\n  const int w_high_ptr_offset = w_low_ptr_offset + w_stride;\n  const int base_ptr = m * channels + c;\n\n  scalar_t v1 = 0;\n  if (h_low >= 0 && w_low >= 0)\n  {\n    const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;\n    v1 = bottom_data[ptr1];\n  }\n  scalar_t v2 = 0;\n  if (h_low >= 0 && w_high <= width - 1)\n  {\n    const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;\n    v2 = bottom_data[ptr2];\n  }\n  scalar_t v3 = 0;\n  if (h_high <= height - 1 && w_low >= 0)\n  {\n    const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;\n    v3 = bottom_data[ptr3];\n  }\n  scalar_t v4 = 0;\n  if (h_high <= height - 1 && w_high <= width - 1)\n  {\n    const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;\n    v4 = bottom_data[ptr4];\n  }\n\n  const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;\n\n  const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);\n  return val;\n}\n\n\ntemplate <typename scalar_t>\n__device__ void ms_deform_attn_col2im_bilinear(const scalar_t* &bottom_data, \n                                                   const int &height, const int &width, const int &nheads, const int &channels,\n                                                   const scalar_t &h, const scalar_t &w, const int &m, const int &c,\n                                                   const scalar_t &top_grad,\n                                                   const scalar_t &attn_weight,\n                                                   scalar_t* &grad_value, \n                                                   scalar_t* grad_sampling_loc,\n                                                   scalar_t* grad_attn_weight)\n{\n  const int h_low = floor(h);\n  const int w_low = floor(w);\n  const int h_high = h_low + 1;\n  const int w_high = w_low + 1;\n\n  const scalar_t lh = h - h_low;\n  const scalar_t lw = w - w_low;\n  const scalar_t hh = 1 - lh, hw = 1 - lw;\n\n  const int w_stride = nheads * channels;\n  const int h_stride = width * w_stride;\n  const int h_low_ptr_offset = h_low * h_stride;\n  const int h_high_ptr_offset = h_low_ptr_offset + h_stride;\n  const int w_low_ptr_offset = w_low * w_stride;\n  const int w_high_ptr_offset = w_low_ptr_offset + w_stride;\n  const int base_ptr = m * channels + c;\n\n  const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;\n  const scalar_t top_grad_value = top_grad * attn_weight;\n  scalar_t grad_h_weight = 0, grad_w_weight = 0;\n\n  scalar_t v1 = 0;\n  if (h_low >= 0 && w_low >= 0)\n  {\n    const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;\n    v1 = bottom_data[ptr1];\n    grad_h_weight -= hw * v1;\n    grad_w_weight -= hh * v1;\n    atomicAdd(grad_value+ptr1, w1*top_grad_value);\n  }\n  scalar_t v2 = 0;\n  if (h_low >= 0 && w_high <= width - 1)\n  {\n    const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;\n    v2 = bottom_data[ptr2];\n    grad_h_weight -= lw * v2;\n    grad_w_weight += hh * v2;\n    atomicAdd(grad_value+ptr2, w2*top_grad_value);\n  }\n  scalar_t v3 = 0;\n  if (h_high <= height - 1 && w_low >= 0)\n  {\n    const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;\n    v3 = bottom_data[ptr3];\n    grad_h_weight += hw * v3;\n    grad_w_weight -= lh * v3;\n    atomicAdd(grad_value+ptr3, w3*top_grad_value); \n  }\n  scalar_t v4 = 0;\n  if (h_high <= height - 1 && w_high <= width - 1)\n  {\n    const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;\n    v4 = bottom_data[ptr4];\n    grad_h_weight += lw * v4;\n    grad_w_weight += lh * v4;\n    atomicAdd(grad_value+ptr4, w4*top_grad_value);\n  }\n\n  const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);\n  *grad_attn_weight = top_grad * val;\n  *grad_sampling_loc = width * grad_w_weight * top_grad_value;\n  *(grad_sampling_loc + 1) = height * grad_h_weight * top_grad_value;\n}\n\n\ntemplate <typename scalar_t>\n__device__ void ms_deform_attn_col2im_bilinear_gm(const scalar_t* &bottom_data, \n                                                   const int &height, const int &width, const int &nheads, const int &channels,\n                                                   const scalar_t &h, const scalar_t &w, const int &m, const int &c,\n                                                   const scalar_t &top_grad,\n                                                   const scalar_t &attn_weight,\n                                                   scalar_t* &grad_value, \n                                                   scalar_t* grad_sampling_loc,\n                                                   scalar_t* grad_attn_weight)\n{\n  const int h_low = floor(h);\n  const int w_low = floor(w);\n  const int h_high = h_low + 1;\n  const int w_high = w_low + 1;\n\n  const scalar_t lh = h - h_low;\n  const scalar_t lw = w - w_low;\n  const scalar_t hh = 1 - lh, hw = 1 - lw;\n\n  const int w_stride = nheads * channels;\n  const int h_stride = width * w_stride;\n  const int h_low_ptr_offset = h_low * h_stride;\n  const int h_high_ptr_offset = h_low_ptr_offset + h_stride;\n  const int w_low_ptr_offset = w_low * w_stride;\n  const int w_high_ptr_offset = w_low_ptr_offset + w_stride;\n  const int base_ptr = m * channels + c;\n\n  const scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw;\n  const scalar_t top_grad_value = top_grad * attn_weight;\n  scalar_t grad_h_weight = 0, grad_w_weight = 0;\n\n  scalar_t v1 = 0;\n  if (h_low >= 0 && w_low >= 0)\n  {\n    const int ptr1 = h_low_ptr_offset + w_low_ptr_offset + base_ptr;\n    v1 = bottom_data[ptr1];\n    grad_h_weight -= hw * v1;\n    grad_w_weight -= hh * v1;\n    atomicAdd(grad_value+ptr1, w1*top_grad_value);\n  }\n  scalar_t v2 = 0;\n  if (h_low >= 0 && w_high <= width - 1)\n  {\n    const int ptr2 = h_low_ptr_offset + w_high_ptr_offset + base_ptr;\n    v2 = bottom_data[ptr2];\n    grad_h_weight -= lw * v2;\n    grad_w_weight += hh * v2;\n    atomicAdd(grad_value+ptr2, w2*top_grad_value);\n  }\n  scalar_t v3 = 0;\n  if (h_high <= height - 1 && w_low >= 0)\n  {\n    const int ptr3 = h_high_ptr_offset + w_low_ptr_offset + base_ptr;\n    v3 = bottom_data[ptr3];\n    grad_h_weight += hw * v3;\n    grad_w_weight -= lh * v3;\n    atomicAdd(grad_value+ptr3, w3*top_grad_value); \n  }\n  scalar_t v4 = 0;\n  if (h_high <= height - 1 && w_high <= width - 1)\n  {\n    const int ptr4 = h_high_ptr_offset + w_high_ptr_offset + base_ptr;\n    v4 = bottom_data[ptr4];\n    grad_h_weight += lw * v4;\n    grad_w_weight += lh * v4;\n    atomicAdd(grad_value+ptr4, w4*top_grad_value);\n  }\n\n  const scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);\n  atomicAdd(grad_attn_weight, top_grad * val); \n  atomicAdd(grad_sampling_loc, width * grad_w_weight * top_grad_value);\n  atomicAdd(grad_sampling_loc + 1, height * grad_h_weight * top_grad_value);\n}\n\n\ntemplate <typename scalar_t>\n__global__ void ms_deformable_im2col_gpu_kernel(const int n,\n                                                const scalar_t *data_value, \n                                                const int64_t *data_spatial_shapes,\n                                                const int64_t *data_level_start_index, \n                                                const scalar_t *data_sampling_loc,\n                                                const scalar_t *data_attn_weight,\n                                                const int batch_size, \n                                                const int spatial_size, \n                                                const int num_heads,\n                                                const int channels, \n                                                const int num_levels,\n                                                const int num_query,\n                                                const int num_point,\n                                                scalar_t *data_col)\n{\n  CUDA_KERNEL_LOOP(index, n)\n  {\n    int _temp = index;\n    const int c_col = _temp % channels;\n    _temp /= channels;\n    const int sampling_index = _temp; \n    const int m_col = _temp % num_heads;\n    _temp /= num_heads;\n    const int q_col = _temp % num_query;\n    _temp /= num_query;\n    const int b_col = _temp;\n\n    scalar_t *data_col_ptr = data_col + index;\n    int data_weight_ptr = sampling_index * num_levels * num_point;\n    int data_loc_w_ptr = data_weight_ptr << 1;\n    const int qid_stride = num_heads * channels;\n    const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;\n    scalar_t col = 0;\n    \n    for (int l_col=0; l_col < num_levels; ++l_col)\n    {\n      const int level_start_id = data_level_start_index[l_col];\n      const int spatial_h_ptr = l_col << 1;\n      const int spatial_h = data_spatial_shapes[spatial_h_ptr];\n      const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];\n      const scalar_t *data_value_ptr = data_value + (data_value_ptr_init_offset + level_start_id * qid_stride);\n      for (int p_col=0; p_col < num_point; ++p_col)\n      {\n        const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];\n        const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];\n        const scalar_t weight = data_attn_weight[data_weight_ptr];\n\n        const scalar_t h_im = loc_h * spatial_h - 0.5;\n        const scalar_t w_im = loc_w * spatial_w - 0.5;\n\n        if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)\n        {\n          col += ms_deform_attn_im2col_bilinear(data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col) * weight;\n        }\n\n        data_weight_ptr += 1;\n        data_loc_w_ptr += 2;\n      }\n    }\n    *data_col_ptr = col;\n  }\n}\n\ntemplate <typename scalar_t, unsigned int blockSize>\n__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1(const int n,\n                                                const scalar_t *grad_col,\n                                                const scalar_t *data_value,\n                                                const int64_t *data_spatial_shapes,\n                                                const int64_t *data_level_start_index, \n                                                const scalar_t *data_sampling_loc,\n                                                const scalar_t *data_attn_weight,\n                                                const int batch_size, \n                                                const int spatial_size, \n                                                const int num_heads,\n                                                const int channels, \n                                                const int num_levels,\n                                                const int num_query,\n                                                const int num_point,\n                                                scalar_t *grad_value,\n                                                scalar_t *grad_sampling_loc,\n                                                scalar_t *grad_attn_weight)\n{\n  CUDA_KERNEL_LOOP(index, n)\n  {\n    __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];\n    __shared__ scalar_t cache_grad_attn_weight[blockSize];\n    unsigned int tid = threadIdx.x;\n    int _temp = index;\n    const int c_col = _temp % channels;\n    _temp /= channels;\n    const int sampling_index = _temp; \n    const int m_col = _temp % num_heads;\n    _temp /= num_heads;\n    const int q_col = _temp % num_query;\n    _temp /= num_query;\n    const int b_col = _temp;\n\n    const scalar_t top_grad = grad_col[index];\n\n    int data_weight_ptr = sampling_index * num_levels * num_point;\n    int data_loc_w_ptr = data_weight_ptr << 1;\n    const int grad_sampling_ptr = data_weight_ptr;\n    grad_sampling_loc += grad_sampling_ptr << 1;\n    grad_attn_weight += grad_sampling_ptr;\n    const int grad_weight_stride = 1;\n    const int grad_loc_stride = 2;\n    const int qid_stride = num_heads * channels;\n    const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;\n\n    for (int l_col=0; l_col < num_levels; ++l_col)\n    {\n      const int level_start_id = data_level_start_index[l_col];\n      const int spatial_h_ptr = l_col << 1;\n      const int spatial_h = data_spatial_shapes[spatial_h_ptr];\n      const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];\n      const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;\n      const scalar_t *data_value_ptr = data_value + value_ptr_offset;\n      scalar_t *grad_value_ptr = grad_value + value_ptr_offset;\n\n      for (int p_col=0; p_col < num_point; ++p_col)\n      {\n        const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];\n        const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];\n        const scalar_t weight = data_attn_weight[data_weight_ptr];\n\n        const scalar_t h_im = loc_h * spatial_h - 0.5;\n        const scalar_t w_im = loc_w * spatial_w - 0.5;\n        *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;\n        *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;\n        *(cache_grad_attn_weight+threadIdx.x)=0;\n        if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)\n        {\n          ms_deform_attn_col2im_bilinear(\n            data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,\n            top_grad, weight, grad_value_ptr, \n            cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);\n        }\n        \n        __syncthreads();\n        if (tid == 0)\n        {\n          scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];\n          int sid=2;\n          for (unsigned int tid = 1; tid < blockSize; ++tid)\n          {\n            _grad_w += cache_grad_sampling_loc[sid];\n            _grad_h += cache_grad_sampling_loc[sid + 1];\n            _grad_a += cache_grad_attn_weight[tid];\n            sid += 2;\n          }\n          \n          \n          *grad_sampling_loc = _grad_w;\n          *(grad_sampling_loc + 1) = _grad_h;\n          *grad_attn_weight = _grad_a;\n        }\n        __syncthreads();\n\n        data_weight_ptr += 1;\n        data_loc_w_ptr += 2;\n        grad_attn_weight += grad_weight_stride;\n        grad_sampling_loc += grad_loc_stride;\n      }\n    }\n  }\n}\n\n\ntemplate <typename scalar_t, unsigned int blockSize>\n__global__ void ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2(const int n,\n                                                const scalar_t *grad_col,\n                                                const scalar_t *data_value,\n                                                const int64_t *data_spatial_shapes,\n                                                const int64_t *data_level_start_index, \n                                                const scalar_t *data_sampling_loc,\n                                                const scalar_t *data_attn_weight,\n                                                const int batch_size, \n                                                const int spatial_size, \n                                                const int num_heads,\n                                                const int channels, \n                                                const int num_levels,\n                                                const int num_query,\n                                                const int num_point,\n                                                scalar_t *grad_value,\n                                                scalar_t *grad_sampling_loc,\n                                                scalar_t *grad_attn_weight)\n{\n  CUDA_KERNEL_LOOP(index, n)\n  {\n    __shared__ scalar_t cache_grad_sampling_loc[blockSize * 2];\n    __shared__ scalar_t cache_grad_attn_weight[blockSize];\n    unsigned int tid = threadIdx.x;\n    int _temp = index;\n    const int c_col = _temp % channels;\n    _temp /= channels;\n    const int sampling_index = _temp; \n    const int m_col = _temp % num_heads;\n    _temp /= num_heads;\n    const int q_col = _temp % num_query;\n    _temp /= num_query;\n    const int b_col = _temp;\n\n    const scalar_t top_grad = grad_col[index];\n\n    int data_weight_ptr = sampling_index * num_levels * num_point;\n    int data_loc_w_ptr = data_weight_ptr << 1;\n    const int grad_sampling_ptr = data_weight_ptr;\n    grad_sampling_loc += grad_sampling_ptr << 1;\n    grad_attn_weight += grad_sampling_ptr;\n    const int grad_weight_stride = 1;\n    const int grad_loc_stride = 2;\n    const int qid_stride = num_heads * channels;\n    const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;\n\n    for (int l_col=0; l_col < num_levels; ++l_col)\n    {\n      const int level_start_id = data_level_start_index[l_col];\n      const int spatial_h_ptr = l_col << 1;\n      const int spatial_h = data_spatial_shapes[spatial_h_ptr];\n      const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];\n      const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;\n      const scalar_t *data_value_ptr = data_value + value_ptr_offset;\n      scalar_t *grad_value_ptr = grad_value + value_ptr_offset;\n\n      for (int p_col=0; p_col < num_point; ++p_col)\n      {\n        const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];\n        const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];\n        const scalar_t weight = data_attn_weight[data_weight_ptr];\n\n        const scalar_t h_im = loc_h * spatial_h - 0.5;\n        const scalar_t w_im = loc_w * spatial_w - 0.5;\n        *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;\n        *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;\n        *(cache_grad_attn_weight+threadIdx.x)=0;\n        if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)\n        {\n          ms_deform_attn_col2im_bilinear(\n            data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,\n            top_grad, weight, grad_value_ptr, \n            cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);\n        }\n        \n        __syncthreads();\n\n        for (unsigned int s=blockSize/2; s>0; s>>=1)\n        {\n          if (tid < s) {\n            const unsigned int xid1 = tid << 1;\n            const unsigned int xid2 = (tid + s) << 1;\n            cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];\n            cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];\n            cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];\n          }\n          __syncthreads();\n        }\n\n        if (tid == 0)\n        { \n          *grad_sampling_loc = cache_grad_sampling_loc[0];\n          *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];\n          *grad_attn_weight = cache_grad_attn_weight[0];\n        }\n        __syncthreads();\n\n        data_weight_ptr += 1;\n        data_loc_w_ptr += 2;\n        grad_attn_weight += grad_weight_stride;\n        grad_sampling_loc += grad_loc_stride;\n      }\n    }\n  }\n}\n\n\ntemplate <typename scalar_t>\n__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v1(const int n,\n                                                const scalar_t *grad_col,\n                                                const scalar_t *data_value,\n                                                const int64_t *data_spatial_shapes,\n                                                const int64_t *data_level_start_index, \n                                                const scalar_t *data_sampling_loc,\n                                                const scalar_t *data_attn_weight,\n                                                const int batch_size, \n                                                const int spatial_size, \n                                                const int num_heads,\n                                                const int channels, \n                                                const int num_levels,\n                                                const int num_query,\n                                                const int num_point,\n                                                scalar_t *grad_value,\n                                                scalar_t *grad_sampling_loc,\n                                                scalar_t *grad_attn_weight)\n{\n  CUDA_KERNEL_LOOP(index, n)\n  {\n    extern __shared__ int _s[];\n    scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;\n    scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;\n    unsigned int tid = threadIdx.x;\n    int _temp = index;\n    const int c_col = _temp % channels;\n    _temp /= channels;\n    const int sampling_index = _temp; \n    const int m_col = _temp % num_heads;\n    _temp /= num_heads;\n    const int q_col = _temp % num_query;\n    _temp /= num_query;\n    const int b_col = _temp;\n\n    const scalar_t top_grad = grad_col[index];\n\n    int data_weight_ptr = sampling_index * num_levels * num_point;\n    int data_loc_w_ptr = data_weight_ptr << 1;\n    const int grad_sampling_ptr = data_weight_ptr;\n    grad_sampling_loc += grad_sampling_ptr << 1;\n    grad_attn_weight += grad_sampling_ptr;\n    const int grad_weight_stride = 1;\n    const int grad_loc_stride = 2;\n    const int qid_stride = num_heads * channels;\n    const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;\n\n    for (int l_col=0; l_col < num_levels; ++l_col)\n    {\n      const int level_start_id = data_level_start_index[l_col];\n      const int spatial_h_ptr = l_col << 1;\n      const int spatial_h = data_spatial_shapes[spatial_h_ptr];\n      const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];\n      const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;\n      const scalar_t *data_value_ptr = data_value + value_ptr_offset;\n      scalar_t *grad_value_ptr = grad_value + value_ptr_offset;\n\n      for (int p_col=0; p_col < num_point; ++p_col)\n      {\n        const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];\n        const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];\n        const scalar_t weight = data_attn_weight[data_weight_ptr];\n\n        const scalar_t h_im = loc_h * spatial_h - 0.5;\n        const scalar_t w_im = loc_w * spatial_w - 0.5;\n        *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;\n        *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;\n        *(cache_grad_attn_weight+threadIdx.x)=0;\n        if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)\n        {\n          ms_deform_attn_col2im_bilinear(\n            data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,\n            top_grad, weight, grad_value_ptr, \n            cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);\n        }\n        \n        __syncthreads();\n        if (tid == 0)\n        {\n          scalar_t _grad_w=cache_grad_sampling_loc[0], _grad_h=cache_grad_sampling_loc[1], _grad_a=cache_grad_attn_weight[0];\n          int sid=2;\n          for (unsigned int tid = 1; tid < blockDim.x; ++tid)\n          {\n            _grad_w += cache_grad_sampling_loc[sid];\n            _grad_h += cache_grad_sampling_loc[sid + 1];\n            _grad_a += cache_grad_attn_weight[tid];\n            sid += 2;\n          }\n          \n          \n          *grad_sampling_loc = _grad_w;\n          *(grad_sampling_loc + 1) = _grad_h;\n          *grad_attn_weight = _grad_a;\n        }\n        __syncthreads();\n\n        data_weight_ptr += 1;\n        data_loc_w_ptr += 2;\n        grad_attn_weight += grad_weight_stride;\n        grad_sampling_loc += grad_loc_stride;\n      }\n    }\n  }\n}\n\ntemplate <typename scalar_t>\n__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2(const int n,\n                                                const scalar_t *grad_col,\n                                                const scalar_t *data_value,\n                                                const int64_t *data_spatial_shapes,\n                                                const int64_t *data_level_start_index, \n                                                const scalar_t *data_sampling_loc,\n                                                const scalar_t *data_attn_weight,\n                                                const int batch_size, \n                                                const int spatial_size, \n                                                const int num_heads,\n                                                const int channels, \n                                                const int num_levels,\n                                                const int num_query,\n                                                const int num_point,\n                                                scalar_t *grad_value,\n                                                scalar_t *grad_sampling_loc,\n                                                scalar_t *grad_attn_weight)\n{\n  CUDA_KERNEL_LOOP(index, n)\n  {\n    extern __shared__ int _s[];\n    scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;\n    scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;\n    unsigned int tid = threadIdx.x;\n    int _temp = index;\n    const int c_col = _temp % channels;\n    _temp /= channels;\n    const int sampling_index = _temp; \n    const int m_col = _temp % num_heads;\n    _temp /= num_heads;\n    const int q_col = _temp % num_query;\n    _temp /= num_query;\n    const int b_col = _temp;\n\n    const scalar_t top_grad = grad_col[index];\n\n    int data_weight_ptr = sampling_index * num_levels * num_point;\n    int data_loc_w_ptr = data_weight_ptr << 1;\n    const int grad_sampling_ptr = data_weight_ptr;\n    grad_sampling_loc += grad_sampling_ptr << 1;\n    grad_attn_weight += grad_sampling_ptr;\n    const int grad_weight_stride = 1;\n    const int grad_loc_stride = 2;\n    const int qid_stride = num_heads * channels;\n    const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;\n\n    for (int l_col=0; l_col < num_levels; ++l_col)\n    {\n      const int level_start_id = data_level_start_index[l_col];\n      const int spatial_h_ptr = l_col << 1;\n      const int spatial_h = data_spatial_shapes[spatial_h_ptr];\n      const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];\n      const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;\n      const scalar_t *data_value_ptr = data_value + value_ptr_offset;\n      scalar_t *grad_value_ptr = grad_value + value_ptr_offset;\n\n      for (int p_col=0; p_col < num_point; ++p_col)\n      {\n        const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];\n        const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];\n        const scalar_t weight = data_attn_weight[data_weight_ptr];\n\n        const scalar_t h_im = loc_h * spatial_h - 0.5;\n        const scalar_t w_im = loc_w * spatial_w - 0.5;\n        *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;\n        *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;\n        *(cache_grad_attn_weight+threadIdx.x)=0;\n        if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)\n        {\n          ms_deform_attn_col2im_bilinear(\n            data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,\n            top_grad, weight, grad_value_ptr, \n            cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);\n        }\n        \n        __syncthreads();\n\n        for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)\n        {\n          if (tid < s) {\n            const unsigned int xid1 = tid << 1;\n            const unsigned int xid2 = (tid + s) << 1;\n            cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];\n            cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];\n            cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];\n            if (tid + (s << 1) < spre)\n            {\n              cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];\n              cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];\n              cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];\n            } \n          }\n          __syncthreads();\n        }\n\n        if (tid == 0)\n        {\n          *grad_sampling_loc = cache_grad_sampling_loc[0];\n          *(grad_sampling_loc + 1) = cache_grad_sampling_loc[1];\n          *grad_attn_weight = cache_grad_attn_weight[0];\n        }\n        __syncthreads();\n\n        data_weight_ptr += 1;\n        data_loc_w_ptr += 2;\n        grad_attn_weight += grad_weight_stride;\n        grad_sampling_loc += grad_loc_stride;\n      }\n    }\n  }\n}\n\ntemplate <typename scalar_t>\n__global__ void ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks(const int n,\n                                                const scalar_t *grad_col,\n                                                const scalar_t *data_value,\n                                                const int64_t *data_spatial_shapes,\n                                                const int64_t *data_level_start_index, \n                                                const scalar_t *data_sampling_loc,\n                                                const scalar_t *data_attn_weight,\n                                                const int batch_size, \n                                                const int spatial_size, \n                                                const int num_heads,\n                                                const int channels, \n                                                const int num_levels,\n                                                const int num_query,\n                                                const int num_point,\n                                                scalar_t *grad_value,\n                                                scalar_t *grad_sampling_loc,\n                                                scalar_t *grad_attn_weight)\n{\n  CUDA_KERNEL_LOOP(index, n)\n  {\n    extern __shared__ int _s[];\n    scalar_t* cache_grad_sampling_loc = (scalar_t*)_s;\n    scalar_t* cache_grad_attn_weight = cache_grad_sampling_loc + 2 * blockDim.x;\n    unsigned int tid = threadIdx.x;\n    int _temp = index;\n    const int c_col = _temp % channels;\n    _temp /= channels;\n    const int sampling_index = _temp; \n    const int m_col = _temp % num_heads;\n    _temp /= num_heads;\n    const int q_col = _temp % num_query;\n    _temp /= num_query;\n    const int b_col = _temp;\n\n    const scalar_t top_grad = grad_col[index];\n\n    int data_weight_ptr = sampling_index * num_levels * num_point;\n    int data_loc_w_ptr = data_weight_ptr << 1;\n    const int grad_sampling_ptr = data_weight_ptr;\n    grad_sampling_loc += grad_sampling_ptr << 1;\n    grad_attn_weight += grad_sampling_ptr;\n    const int grad_weight_stride = 1;\n    const int grad_loc_stride = 2;\n    const int qid_stride = num_heads * channels;\n    const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;\n\n    for (int l_col=0; l_col < num_levels; ++l_col)\n    {\n      const int level_start_id = data_level_start_index[l_col];\n      const int spatial_h_ptr = l_col << 1;\n      const int spatial_h = data_spatial_shapes[spatial_h_ptr];\n      const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];\n      const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;\n      const scalar_t *data_value_ptr = data_value + value_ptr_offset;\n      scalar_t *grad_value_ptr = grad_value + value_ptr_offset;\n\n      for (int p_col=0; p_col < num_point; ++p_col)\n      {\n        const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];\n        const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];\n        const scalar_t weight = data_attn_weight[data_weight_ptr];\n\n        const scalar_t h_im = loc_h * spatial_h - 0.5;\n        const scalar_t w_im = loc_w * spatial_w - 0.5;\n        *(cache_grad_sampling_loc+(threadIdx.x << 1)) = 0;\n        *(cache_grad_sampling_loc+((threadIdx.x << 1) + 1)) = 0;\n        *(cache_grad_attn_weight+threadIdx.x)=0;\n        if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)\n        {\n          ms_deform_attn_col2im_bilinear(\n            data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,\n            top_grad, weight, grad_value_ptr, \n            cache_grad_sampling_loc+(threadIdx.x << 1), cache_grad_attn_weight+threadIdx.x);\n        }\n        \n        __syncthreads();\n\n        for (unsigned int s=blockDim.x/2, spre=blockDim.x; s>0; s>>=1, spre>>=1)\n        {\n          if (tid < s) {\n            const unsigned int xid1 = tid << 1;\n            const unsigned int xid2 = (tid + s) << 1;\n            cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + s];\n            cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2];\n            cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1];\n            if (tid + (s << 1) < spre)\n            {\n              cache_grad_attn_weight[tid] += cache_grad_attn_weight[tid + (s << 1)];\n              cache_grad_sampling_loc[xid1] += cache_grad_sampling_loc[xid2 + (s << 1)];\n              cache_grad_sampling_loc[xid1 + 1] += cache_grad_sampling_loc[xid2 + 1 + (s << 1)];\n            }\n          }\n          __syncthreads();\n        }\n\n        if (tid == 0)\n        {\n          atomicAdd(grad_sampling_loc, cache_grad_sampling_loc[0]);\n          atomicAdd(grad_sampling_loc + 1, cache_grad_sampling_loc[1]);\n          atomicAdd(grad_attn_weight, cache_grad_attn_weight[0]);\n        }\n        __syncthreads();\n\n        data_weight_ptr += 1;\n        data_loc_w_ptr += 2;\n        grad_attn_weight += grad_weight_stride;\n        grad_sampling_loc += grad_loc_stride;\n      }\n    }\n  }\n}\n\n\ntemplate <typename scalar_t>\n__global__ void ms_deformable_col2im_gpu_kernel_gm(const int n,\n                                                const scalar_t *grad_col,\n                                                const scalar_t *data_value,\n                                                const int64_t *data_spatial_shapes,\n                                                const int64_t *data_level_start_index, \n                                                const scalar_t *data_sampling_loc,\n                                                const scalar_t *data_attn_weight,\n                                                const int batch_size, \n                                                const int spatial_size, \n                                                const int num_heads,\n                                                const int channels, \n                                                const int num_levels,\n                                                const int num_query,\n                                                const int num_point,\n                                                scalar_t *grad_value,\n                                                scalar_t *grad_sampling_loc,\n                                                scalar_t *grad_attn_weight)\n{\n  CUDA_KERNEL_LOOP(index, n)\n  {\n    int _temp = index;\n    const int c_col = _temp % channels;\n    _temp /= channels;\n    const int sampling_index = _temp; \n    const int m_col = _temp % num_heads;\n    _temp /= num_heads;\n    const int q_col = _temp % num_query;\n    _temp /= num_query;\n    const int b_col = _temp;\n\n    const scalar_t top_grad = grad_col[index];\n\n    int data_weight_ptr = sampling_index * num_levels * num_point;\n    int data_loc_w_ptr = data_weight_ptr << 1;\n    const int grad_sampling_ptr = data_weight_ptr;\n    grad_sampling_loc += grad_sampling_ptr << 1;\n    grad_attn_weight += grad_sampling_ptr;\n    const int grad_weight_stride = 1;\n    const int grad_loc_stride = 2;\n    const int qid_stride = num_heads * channels;\n    const int data_value_ptr_init_offset = b_col * spatial_size * qid_stride;\n\n    for (int l_col=0; l_col < num_levels; ++l_col)\n    {\n      const int level_start_id = data_level_start_index[l_col];\n      const int spatial_h_ptr = l_col << 1;\n      const int spatial_h = data_spatial_shapes[spatial_h_ptr];\n      const int spatial_w = data_spatial_shapes[spatial_h_ptr + 1];\n      const int value_ptr_offset = data_value_ptr_init_offset + level_start_id * qid_stride;\n      const scalar_t *data_value_ptr = data_value + value_ptr_offset;\n      scalar_t *grad_value_ptr = grad_value + value_ptr_offset;\n\n      for (int p_col=0; p_col < num_point; ++p_col)\n      {\n        const scalar_t loc_w = data_sampling_loc[data_loc_w_ptr];\n        const scalar_t loc_h = data_sampling_loc[data_loc_w_ptr + 1];\n        const scalar_t weight = data_attn_weight[data_weight_ptr];\n\n        const scalar_t h_im = loc_h * spatial_h - 0.5;\n        const scalar_t w_im = loc_w * spatial_w - 0.5;\n        if (h_im > -1 && w_im > -1 && h_im < spatial_h && w_im < spatial_w)\n        {\n          ms_deform_attn_col2im_bilinear_gm(\n            data_value_ptr, spatial_h, spatial_w, num_heads, channels, h_im, w_im, m_col, c_col,\n            top_grad, weight, grad_value_ptr, \n            grad_sampling_loc, grad_attn_weight);\n        }\n        data_weight_ptr += 1;\n        data_loc_w_ptr += 2;\n        grad_attn_weight += grad_weight_stride;\n        grad_sampling_loc += grad_loc_stride;\n      }\n    }\n  }\n}\n\n\ntemplate <typename scalar_t>\nvoid ms_deformable_im2col_cuda(cudaStream_t stream,\n                              const scalar_t* data_value,\n                              const int64_t* data_spatial_shapes, \n                              const int64_t* data_level_start_index, \n                              const scalar_t* data_sampling_loc,\n                              const scalar_t* data_attn_weight,\n                              const int batch_size,\n                              const int spatial_size, \n                              const int num_heads, \n                              const int channels, \n                              const int num_levels, \n                              const int num_query,\n                              const int num_point,\n                              scalar_t* data_col)\n{\n  const int num_kernels = batch_size * num_query * num_heads * channels;\n  const int num_actual_kernels = batch_size * num_query * num_heads * channels;\n  const int num_threads = CUDA_NUM_THREADS;\n  ms_deformable_im2col_gpu_kernel<scalar_t>\n      <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n          0, stream>>>(\n      num_kernels, data_value, data_spatial_shapes, data_level_start_index, data_sampling_loc, data_attn_weight, \n      batch_size, spatial_size, num_heads, channels, num_levels, num_query, num_point, data_col);\n  \n  cudaError_t err = cudaGetLastError();\n  if (err != cudaSuccess)\n  {\n    printf(\"error in ms_deformable_im2col_cuda: %s\\n\", cudaGetErrorString(err));\n  }\n\n}\n\ntemplate <typename scalar_t>\nvoid ms_deformable_col2im_cuda(cudaStream_t stream,\n                              const scalar_t* grad_col,\n                              const scalar_t* data_value,\n                              const int64_t * data_spatial_shapes,\n                              const int64_t * data_level_start_index,\n                              const scalar_t * data_sampling_loc,\n                              const scalar_t * data_attn_weight,\n                              const int batch_size, \n                              const int spatial_size, \n                              const int num_heads,\n                              const int channels, \n                              const int num_levels,\n                              const int num_query,\n                              const int num_point, \n                              scalar_t* grad_value,\n                              scalar_t* grad_sampling_loc,\n                              scalar_t* grad_attn_weight)\n{\n  const int num_threads = (channels > CUDA_NUM_THREADS)?CUDA_NUM_THREADS:channels;\n  const int num_kernels = batch_size * num_query * num_heads * channels;\n  const int num_actual_kernels = batch_size * num_query * num_heads * channels;\n  if (channels > 1024)\n  {\n    if ((channels & 1023) == 0)\n    {\n      ms_deformable_col2im_gpu_kernel_shm_reduce_v2_multi_blocks<scalar_t>\n          <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n              num_threads*3*sizeof(scalar_t), stream>>>(\n                        num_kernels, \n                        grad_col,\n                        data_value,\n                        data_spatial_shapes,\n                        data_level_start_index, \n                        data_sampling_loc,\n                        data_attn_weight,\n                        batch_size, \n                        spatial_size, \n                        num_heads,\n                        channels, \n                        num_levels,\n                        num_query,\n                        num_point,\n                        grad_value,\n                        grad_sampling_loc,\n                        grad_attn_weight);\n    }\n    else\n    {\n      ms_deformable_col2im_gpu_kernel_gm<scalar_t>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n    }\n  }\n  else{\n    switch(channels)\n    {\n      case 1:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 1>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      case 2:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 2>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      case 4:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 4>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      case 8:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 8>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      case 16:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 16>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      case 32:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v1<scalar_t, 32>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      case 64:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 64>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      case 128:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 128>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      case 256:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 256>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      case 512:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 512>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      case 1024:\n        ms_deformable_col2im_gpu_kernel_shm_blocksize_aware_reduce_v2<scalar_t, 1024>\n        <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n            0, stream>>>(\n                      num_kernels, \n                      grad_col,\n                      data_value,\n                      data_spatial_shapes,\n                      data_level_start_index, \n                      data_sampling_loc,\n                      data_attn_weight,\n                      batch_size, \n                      spatial_size, \n                      num_heads,\n                      channels, \n                      num_levels,\n                      num_query,\n                      num_point,\n                      grad_value,\n                      grad_sampling_loc,\n                      grad_attn_weight);\n        break;\n      default:\n        if (channels < 64)\n        {\n          ms_deformable_col2im_gpu_kernel_shm_reduce_v1<scalar_t>\n          <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n              num_threads*3*sizeof(scalar_t), stream>>>(\n                        num_kernels, \n                        grad_col,\n                        data_value,\n                        data_spatial_shapes,\n                        data_level_start_index, \n                        data_sampling_loc,\n                        data_attn_weight,\n                        batch_size, \n                        spatial_size, \n                        num_heads,\n                        channels, \n                        num_levels,\n                        num_query,\n                        num_point,\n                        grad_value,\n                        grad_sampling_loc,\n                        grad_attn_weight);\n        }\n        else\n        {\n          ms_deformable_col2im_gpu_kernel_shm_reduce_v2<scalar_t>\n          <<<GET_BLOCKS(num_actual_kernels, num_threads), num_threads,\n              num_threads*3*sizeof(scalar_t), stream>>>(\n                        num_kernels, \n                        grad_col,\n                        data_value,\n                        data_spatial_shapes,\n                        data_level_start_index, \n                        data_sampling_loc,\n                        data_attn_weight,\n                        batch_size, \n                        spatial_size, \n                        num_heads,\n                        channels, \n                        num_levels,\n                        num_query,\n                        num_point,\n                        grad_value,\n                        grad_sampling_loc,\n                        grad_attn_weight);\n        }\n    }\n  }\n  cudaError_t err = cudaGetLastError();\n  if (err != cudaSuccess)\n  {\n    printf(\"error in ms_deformable_col2im_cuda: %s\\n\", cudaGetErrorString(err));\n  }\n\n}"
  },
  {
    "path": "projects/instance_segment_anything/ops/src/ms_deform_attn.h",
    "content": "/*!\n**************************************************************************************************\n* Deformable DETR\n* Copyright (c) 2020 SenseTime. All Rights Reserved.\n* Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n**************************************************************************************************\n* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n**************************************************************************************************\n*/\n\n#pragma once\n\n#include \"cpu/ms_deform_attn_cpu.h\"\n\n#ifdef WITH_CUDA\n#include \"cuda/ms_deform_attn_cuda.h\"\n#endif\n\n\nat::Tensor\nms_deform_attn_forward(\n    const at::Tensor &value, \n    const at::Tensor &spatial_shapes,\n    const at::Tensor &level_start_index,\n    const at::Tensor &sampling_loc,\n    const at::Tensor &attn_weight,\n    const int im2col_step)\n{\n    if (value.type().is_cuda())\n    {\n#ifdef WITH_CUDA\n        return ms_deform_attn_cuda_forward(\n            value, spatial_shapes, level_start_index, sampling_loc, attn_weight, im2col_step);\n#else\n        AT_ERROR(\"Not compiled with GPU support\");\n#endif\n    }\n    AT_ERROR(\"Not implemented on the CPU\");\n}\n\nstd::vector<at::Tensor>\nms_deform_attn_backward(\n    const at::Tensor &value, \n    const at::Tensor &spatial_shapes,\n    const at::Tensor &level_start_index,\n    const at::Tensor &sampling_loc,\n    const at::Tensor &attn_weight,\n    const at::Tensor &grad_output,\n    const int im2col_step)\n{\n    if (value.type().is_cuda())\n    {\n#ifdef WITH_CUDA\n        return ms_deform_attn_cuda_backward(\n            value, spatial_shapes, level_start_index, sampling_loc, attn_weight, grad_output, im2col_step);\n#else\n        AT_ERROR(\"Not compiled with GPU support\");\n#endif\n    }\n    AT_ERROR(\"Not implemented on the CPU\");\n}\n\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/src/vision.cpp",
    "content": "/*!\n**************************************************************************************************\n* Deformable DETR\n* Copyright (c) 2020 SenseTime. All Rights Reserved.\n* Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n**************************************************************************************************\n* Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n**************************************************************************************************\n*/\n\n#include \"ms_deform_attn.h\"\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n  m.def(\"ms_deform_attn_forward\", &ms_deform_attn_forward, \"ms_deform_attn_forward\");\n  m.def(\"ms_deform_attn_backward\", &ms_deform_attn_backward, \"ms_deform_attn_backward\");\n}\n"
  },
  {
    "path": "projects/instance_segment_anything/ops/test.py",
    "content": "# ------------------------------------------------------------------------------------------------\n# Deformable DETR\n# Copyright (c) 2020 SenseTime. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 [see LICENSE for details]\n# ------------------------------------------------------------------------------------------------\n# Modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/tree/pytorch_1.0.0\n# ------------------------------------------------------------------------------------------------\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport time\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import gradcheck\n\nfrom functions.ms_deform_attn_func import MSDeformAttnFunction, ms_deform_attn_core_pytorch\n\n\nN, M, D = 1, 2, 2\nLq, L, P = 2, 2, 2\nshapes = torch.as_tensor([(6, 4), (3, 2)], dtype=torch.long).cuda()\nlevel_start_index = torch.cat((shapes.new_zeros((1, )), shapes.prod(1).cumsum(0)[:-1]))\nS = sum([(H*W).item() for H, W in shapes])\n\n\ntorch.manual_seed(3)\n\n\n@torch.no_grad()\ndef check_forward_equal_with_pytorch_double():\n    value = torch.rand(N, S, M, D).cuda() * 0.01\n    sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()\n    attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5\n    attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)\n    im2col_step = 2\n    output_pytorch = ms_deform_attn_core_pytorch(value.double(), shapes, sampling_locations.double(), attention_weights.double()).detach().cpu()\n    output_cuda = MSDeformAttnFunction.apply(value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step).detach().cpu()\n    fwdok = torch.allclose(output_cuda, output_pytorch)\n    max_abs_err = (output_cuda - output_pytorch).abs().max()\n    max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()\n\n    print(f'* {fwdok} check_forward_equal_with_pytorch_double: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')\n\n\n@torch.no_grad()\ndef check_forward_equal_with_pytorch_float():\n    value = torch.rand(N, S, M, D).cuda() * 0.01\n    sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()\n    attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5\n    attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)\n    im2col_step = 2\n    output_pytorch = ms_deform_attn_core_pytorch(value, shapes, sampling_locations, attention_weights).detach().cpu()\n    output_cuda = MSDeformAttnFunction.apply(value, shapes, level_start_index, sampling_locations, attention_weights, im2col_step).detach().cpu()\n    fwdok = torch.allclose(output_cuda, output_pytorch, rtol=1e-2, atol=1e-3)\n    max_abs_err = (output_cuda - output_pytorch).abs().max()\n    max_rel_err = ((output_cuda - output_pytorch).abs() / output_pytorch.abs()).max()\n\n    print(f'* {fwdok} check_forward_equal_with_pytorch_float: max_abs_err {max_abs_err:.2e} max_rel_err {max_rel_err:.2e}')\n\n\ndef check_gradient_numerical(channels=4, grad_value=True, grad_sampling_loc=True, grad_attn_weight=True):\n\n    value = torch.rand(N, S, M, channels).cuda() * 0.01\n    sampling_locations = torch.rand(N, Lq, M, L, P, 2).cuda()\n    attention_weights = torch.rand(N, Lq, M, L, P).cuda() + 1e-5\n    attention_weights /= attention_weights.sum(-1, keepdim=True).sum(-2, keepdim=True)\n    im2col_step = 2\n    func = MSDeformAttnFunction.apply\n\n    value.requires_grad = grad_value\n    sampling_locations.requires_grad = grad_sampling_loc\n    attention_weights.requires_grad = grad_attn_weight\n\n    gradok = gradcheck(func, (value.double(), shapes, level_start_index, sampling_locations.double(), attention_weights.double(), im2col_step))\n\n    print(f'* {gradok} check_gradient_numerical(D={channels})')\n\n\nif __name__ == '__main__':\n    check_forward_equal_with_pytorch_double()\n    check_forward_equal_with_pytorch_float()\n\n    for channels in [30, 32, 64, 71, 1025, 2048, 3096]:\n        check_gradient_numerical(channels, True, True, True)\n\n\n\n"
  },
  {
    "path": "requirements/albu.txt",
    "content": "albumentations>=0.3.2 --no-binary qudida,albumentations\n"
  },
  {
    "path": "requirements/build.txt",
    "content": "# These must be installed before building mmdetection\ncython\nnumpy\n"
  },
  {
    "path": "requirements/docs.txt",
    "content": "docutils==0.16.0\nmarkdown>=3.4.0\nmyst-parser\n-e git+https://github.com/open-mmlab/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme\nsphinx==5.3.0\nsphinx-copybutton\nsphinx_markdown_tables>=0.0.17\nsphinx_rtd_theme\n"
  },
  {
    "path": "requirements/mminstall.txt",
    "content": "mmcv-full>=1.3.17\n"
  },
  {
    "path": "requirements/optional.txt",
    "content": "cityscapesscripts\nimagecorruptions\nscikit-learn\n"
  },
  {
    "path": "requirements/readthedocs.txt",
    "content": "mmcv\nscipy\ntorch\ntorchvision\n"
  },
  {
    "path": "requirements/runtime.txt",
    "content": "matplotlib\nnumpy\npycocotools\nscipy\nsix\nterminaltables\n"
  },
  {
    "path": "requirements/tests.txt",
    "content": "asynctest\ncodecov\nflake8\ninterrogate\nisort==4.3.21\n# Note: used for kwarray.group_items, this may be ported to mmcv in the future.\nkwarray\n-e git+https://github.com/open-mmlab/mmtracking#egg=mmtrack\nonnx==1.7.0\nonnxruntime>=1.8.0\nprotobuf<=3.20.1\npytest\nubelt\nxdoctest>=0.10.0\nyapf\n"
  },
  {
    "path": "requirements.txt",
    "content": "-r requirements/build.txt\n-r requirements/optional.txt\n-r requirements/runtime.txt\n-r requirements/tests.txt\n"
  },
  {
    "path": "setup.cfg",
    "content": "[isort]\nline_length = 79\nmulti_line_output = 0\nextra_standard_library = setuptools\nknown_first_party = mmdet\nknown_third_party = PIL,asynctest,cityscapesscripts,cv2,gather_models,matplotlib,mmcv,numpy,onnx,onnxruntime,pycocotools,pytest,pytorch_sphinx_theme,requests,scipy,seaborn,six,terminaltables,torch,ts,yaml\nno_lines_before = STDLIB,LOCALFOLDER\ndefault_section = THIRDPARTY\n\n[yapf]\nBASED_ON_STYLE = pep8\nBLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true\nSPLIT_BEFORE_EXPRESSION_AFTER_OPENING_PAREN = true\n\n# ignore-words-list needs to be lowercase format. For example, if we want to\n# ignore word \"BA\", then we need to append \"ba\" to ignore-words-list rather\n# than \"BA\"\n[codespell]\nskip = *.ipynb\nquiet-level = 3\nignore-words-list = patten,nd,ty,mot,hist,formating,winn,gool,datas,wan,confids,TOOD,tood,ba,warmup,nam,dota,DOTA\n"
  },
  {
    "path": "setup.py",
    "content": "#!/usr/bin/env python\n# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport os.path as osp\nimport platform\nimport shutil\nimport sys\nimport warnings\nfrom setuptools import find_packages, setup\n\nimport torch\nfrom torch.utils.cpp_extension import (BuildExtension, CppExtension,\n                                       CUDAExtension)\n\n\ndef readme():\n    with open('README.md', encoding='utf-8') as f:\n        content = f.read()\n    return content\n\n\nversion_file = 'mmdet/version.py'\n\n\ndef get_version():\n    with open(version_file, 'r') as f:\n        exec(compile(f.read(), version_file, 'exec'))\n    return locals()['__version__']\n\n\ndef make_cuda_ext(name, module, sources, sources_cuda=[]):\n\n    define_macros = []\n    extra_compile_args = {'cxx': []}\n\n    if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':\n        define_macros += [('WITH_CUDA', None)]\n        extension = CUDAExtension\n        extra_compile_args['nvcc'] = [\n            '-D__CUDA_NO_HALF_OPERATORS__',\n            '-D__CUDA_NO_HALF_CONVERSIONS__',\n            '-D__CUDA_NO_HALF2_OPERATORS__',\n        ]\n        sources += sources_cuda\n    else:\n        print(f'Compiling {name} without CUDA')\n        extension = CppExtension\n\n    return extension(\n        name=f'{module}.{name}',\n        sources=[os.path.join(*module.split('.'), p) for p in sources],\n        define_macros=define_macros,\n        extra_compile_args=extra_compile_args)\n\n\ndef parse_requirements(fname='requirements.txt', with_version=True):\n    \"\"\"Parse the package dependencies listed in a requirements file but strips\n    specific versioning information.\n\n    Args:\n        fname (str): path to requirements file\n        with_version (bool, default=False): if True include version specs\n\n    Returns:\n        List[str]: list of requirements items\n\n    CommandLine:\n        python -c \"import setup; print(setup.parse_requirements())\"\n    \"\"\"\n    import re\n    import sys\n    from os.path import exists\n    require_fpath = fname\n\n    def parse_line(line):\n        \"\"\"Parse information from a line in a requirements text file.\"\"\"\n        if line.startswith('-r '):\n            # Allow specifying requirements in other files\n            target = line.split(' ')[1]\n            for info in parse_require_file(target):\n                yield info\n        else:\n            info = {'line': line}\n            if line.startswith('-e '):\n                info['package'] = line.split('#egg=')[1]\n            elif '@git+' in line:\n                info['package'] = line\n            else:\n                # Remove versioning from the package\n                pat = '(' + '|'.join(['>=', '==', '>']) + ')'\n                parts = re.split(pat, line, maxsplit=1)\n                parts = [p.strip() for p in parts]\n\n                info['package'] = parts[0]\n                if len(parts) > 1:\n                    op, rest = parts[1:]\n                    if ';' in rest:\n                        # Handle platform specific dependencies\n                        # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies\n                        version, platform_deps = map(str.strip,\n                                                     rest.split(';'))\n                        info['platform_deps'] = platform_deps\n                    else:\n                        version = rest  # NOQA\n                    info['version'] = (op, version)\n            yield info\n\n    def parse_require_file(fpath):\n        with open(fpath, 'r') as f:\n            for line in f.readlines():\n                line = line.strip()\n                if line and not line.startswith('#'):\n                    for info in parse_line(line):\n                        yield info\n\n    def gen_packages_items():\n        if exists(require_fpath):\n            for info in parse_require_file(require_fpath):\n                parts = [info['package']]\n                if with_version and 'version' in info:\n                    parts.extend(info['version'])\n                if not sys.version.startswith('3.4'):\n                    # apparently package_deps are broken in 3.4\n                    platform_deps = info.get('platform_deps')\n                    if platform_deps is not None:\n                        parts.append(';' + platform_deps)\n                item = ''.join(parts)\n                yield item\n\n    packages = list(gen_packages_items())\n    return packages\n\n\ndef add_mim_extension():\n    \"\"\"Add extra files that are required to support MIM into the package.\n\n    These files will be added by creating a symlink to the originals if the\n    package is installed in `editable` mode (e.g. pip install -e .), or by\n    copying from the originals otherwise.\n    \"\"\"\n\n    # parse installment mode\n    if 'develop' in sys.argv:\n        # installed by `pip install -e .`\n        if platform.system() == 'Windows':\n            # set `copy` mode here since symlink fails on Windows.\n            mode = 'copy'\n        else:\n            mode = 'symlink'\n    elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:\n        # installed by `pip install .`\n        # or create source distribution by `python setup.py sdist`\n        mode = 'copy'\n    else:\n        return\n\n    filenames = ['tools', 'configs', 'demo', 'model-index.yml']\n    repo_path = osp.dirname(__file__)\n    mim_path = osp.join(repo_path, 'mmdet', '.mim')\n    os.makedirs(mim_path, exist_ok=True)\n\n    for filename in filenames:\n        if osp.exists(filename):\n            src_path = osp.join(repo_path, filename)\n            tar_path = osp.join(mim_path, filename)\n\n            if osp.isfile(tar_path) or osp.islink(tar_path):\n                os.remove(tar_path)\n            elif osp.isdir(tar_path):\n                shutil.rmtree(tar_path)\n\n            if mode == 'symlink':\n                src_relpath = osp.relpath(src_path, osp.dirname(tar_path))\n                os.symlink(src_relpath, tar_path)\n            elif mode == 'copy':\n                if osp.isfile(src_path):\n                    shutil.copyfile(src_path, tar_path)\n                elif osp.isdir(src_path):\n                    shutil.copytree(src_path, tar_path)\n                else:\n                    warnings.warn(f'Cannot copy file {src_path}.')\n            else:\n                raise ValueError(f'Invalid mode {mode}')\n\n\nif __name__ == '__main__':\n    add_mim_extension()\n    setup(\n        name='mmdet',\n        version=get_version(),\n        description='OpenMMLab Detection Toolbox and Benchmark',\n        long_description=readme(),\n        long_description_content_type='text/markdown',\n        author='MMDetection Contributors',\n        author_email='openmmlab@gmail.com',\n        keywords='computer vision, object detection',\n        url='https://github.com/open-mmlab/mmdetection',\n        packages=find_packages(exclude=('configs', 'tools', 'demo')),\n        include_package_data=True,\n        classifiers=[\n            'Development Status :: 5 - Production/Stable',\n            'License :: OSI Approved :: Apache Software License',\n            'Operating System :: OS Independent',\n            'Programming Language :: Python :: 3',\n            'Programming Language :: Python :: 3.7',\n            'Programming Language :: Python :: 3.8',\n            'Programming Language :: Python :: 3.9',\n        ],\n        license='Apache License 2.0',\n        install_requires=parse_requirements('requirements/runtime.txt'),\n        extras_require={\n            'all': parse_requirements('requirements.txt'),\n            'tests': parse_requirements('requirements/tests.txt'),\n            'build': parse_requirements('requirements/build.txt'),\n            'optional': parse_requirements('requirements/optional.txt'),\n            'mim': parse_requirements('requirements/mminstall.txt'),\n        },\n        ext_modules=[],\n        cmdclass={'build_ext': BuildExtension},\n        zip_safe=False)\n"
  },
  {
    "path": "tools/convert_ckpt.py",
    "content": "import torch\nimport argparse\n\nparer = argparse.ArgumentParser()\nparer.add_argument('source_file')\nparer.add_argument('des_file')\nargs = parer.parse_args()\n\nckpt = torch.load(args.source_file, map_location='cpu')\nckpt = ckpt['model']\ntorch.save(ckpt, args.des_file)"
  },
  {
    "path": "tools/dist_test.sh",
    "content": "#!/usr/bin/env bash\n\nCONFIG=$1\nGPUS=$2\nNNODES=${NNODES:-1}\nNODE_RANK=${NODE_RANK:-0}\nPORT=${PORT:-29500}\nMASTER_ADDR=${MASTER_ADDR:-\"127.0.0.1\"}\n\nPYTHONPATH=\"$(dirname $0)/..\":$PYTHONPATH \\\npython -m torch.distributed.launch \\\n    --nnodes=$NNODES \\\n    --node_rank=$NODE_RANK \\\n    --master_addr=$MASTER_ADDR \\\n    --nproc_per_node=$GPUS \\\n    --master_port=$PORT \\\n    $(dirname \"$0\")/test.py \\\n    $CONFIG \\\n    --launcher pytorch \\\n    ${@:3}\n"
  },
  {
    "path": "tools/test.py",
    "content": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\nimport time\nimport warnings\n\nimport mmcv\nimport torch\nfrom mmcv import Config, DictAction\nfrom mmcv.cnn import fuse_conv_bn\nfrom mmcv.runner import (get_dist_info, init_dist, load_checkpoint,\n                         wrap_fp16_model)\n\nfrom mmdet.apis import multi_gpu_test, single_gpu_test\nfrom mmdet.datasets import (build_dataloader, build_dataset,\n                            replace_ImageToTensor)\nfrom mmdet.models import build_detector\nfrom mmdet.utils import (build_ddp, build_dp, compat_cfg, get_device,\n                         replace_cfg_vals, setup_multi_processes,\n                         update_data_root)\n\n\ndef parse_args():\n    parser = argparse.ArgumentParser(\n        description='MMDet test (and eval) a model')\n    parser.add_argument('config', help='test config file path')\n    parser.add_argument(\n        '--work-dir',\n        help='the directory to save the file containing evaluation metrics')\n    parser.add_argument('--out', help='output result file in pickle format')\n    parser.add_argument(\n        '--fuse-conv-bn',\n        action='store_true',\n        help='Whether to fuse conv and bn, this will slightly increase'\n        'the inference speed')\n    parser.add_argument(\n        '--gpu-ids',\n        type=int,\n        nargs='+',\n        help='(Deprecated, please use --gpu-id) ids of gpus to use '\n        '(only applicable to non-distributed training)')\n    parser.add_argument(\n        '--gpu-id',\n        type=int,\n        default=0,\n        help='id of gpu to use '\n        '(only applicable to non-distributed testing)')\n    parser.add_argument(\n        '--format-only',\n        action='store_true',\n        help='Format the output results without perform evaluation. It is'\n        'useful when you want to format the result to a specific format and '\n        'submit it to the test server')\n    parser.add_argument(\n        '--eval',\n        type=str,\n        nargs='+',\n        help='evaluation metrics, which depends on the dataset, e.g., \"bbox\",'\n        ' \"segm\", \"proposal\" for COCO, and \"mAP\", \"recall\" for PASCAL VOC')\n    parser.add_argument('--show', action='store_true', help='show results')\n    parser.add_argument(\n        '--show-dir', help='directory where painted images will be saved')\n    parser.add_argument(\n        '--show-score-thr',\n        type=float,\n        default=0.3,\n        help='score threshold (default: 0.3)')\n    parser.add_argument(\n        '--gpu-collect',\n        action='store_true',\n        help='whether to use gpu to collect results.')\n    parser.add_argument(\n        '--tmpdir',\n        help='tmp directory used for collecting results from multiple '\n        'workers, available when gpu-collect is not specified')\n    parser.add_argument(\n        '--cfg-options',\n        nargs='+',\n        action=DictAction,\n        help='override some settings in the used config, the key-value pair '\n        'in xxx=yyy format will be merged into config file. If the value to '\n        'be overwritten is a list, it should be like key=\"[a,b]\" or key=a,b '\n        'It also allows nested list/tuple values, e.g. key=\"[(a,b),(c,d)]\" '\n        'Note that the quotation marks are necessary and that no white space '\n        'is allowed.')\n    parser.add_argument(\n        '--options',\n        nargs='+',\n        action=DictAction,\n        help='custom options for evaluation, the key-value pair in xxx=yyy '\n        'format will be kwargs for dataset.evaluate() function (deprecate), '\n        'change to --eval-options instead.')\n    parser.add_argument(\n        '--eval-options',\n        nargs='+',\n        action=DictAction,\n        help='custom options for evaluation, the key-value pair in xxx=yyy '\n        'format will be kwargs for dataset.evaluate() function')\n    parser.add_argument(\n        '--launcher',\n        choices=['none', 'pytorch', 'slurm', 'mpi'],\n        default='none',\n        help='job launcher')\n    parser.add_argument('--local_rank', type=int, default=0)\n    args = parser.parse_args()\n    if 'LOCAL_RANK' not in os.environ:\n        os.environ['LOCAL_RANK'] = str(args.local_rank)\n\n    if args.options and args.eval_options:\n        raise ValueError(\n            '--options and --eval-options cannot be both '\n            'specified, --options is deprecated in favor of --eval-options')\n    if args.options:\n        warnings.warn('--options is deprecated in favor of --eval-options')\n        args.eval_options = args.options\n    return args\n\n\ndef main():\n    args = parse_args()\n\n    assert args.out or args.eval or args.format_only or args.show \\\n        or args.show_dir, \\\n        ('Please specify at least one operation (save/eval/format/show the '\n         'results / save the results) with the argument \"--out\", \"--eval\"'\n         ', \"--format-only\", \"--show\" or \"--show-dir\"')\n\n    if args.eval and args.format_only:\n        raise ValueError('--eval and --format_only cannot be both specified')\n\n    if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):\n        raise ValueError('The output file must be a pkl file.')\n\n    cfg = Config.fromfile(args.config)\n\n    # replace the ${key} with the value of cfg.key\n    cfg = replace_cfg_vals(cfg)\n\n    # update data root according to MMDET_DATASETS\n    update_data_root(cfg)\n\n    if args.cfg_options is not None:\n        cfg.merge_from_dict(args.cfg_options)\n\n    cfg = compat_cfg(cfg)\n\n    # set multi-process settings\n    setup_multi_processes(cfg)\n\n    # import modules from plguin/xx, registry will be updated\n    if hasattr(cfg, 'plugin'):\n        if cfg.plugin:\n            import importlib\n            if hasattr(cfg, 'plugin_dir'):\n                plugin_dir = cfg.plugin_dir\n                _module_dir = os.path.dirname(plugin_dir)\n                _module_dir = _module_dir.split('/')\n                _module_path = _module_dir[0]\n\n                for m in _module_dir[1:]:\n                    _module_path = _module_path + '.' + m\n                print(_module_path)\n                plg_lib = importlib.import_module(_module_path)\n            else:\n                # import dir is the dirpath for the config file\n                _module_dir = os.path.dirname(args.config)\n                _module_dir = _module_dir.split('/')\n                _module_path = _module_dir[0]\n                for m in _module_dir[1:]:\n                    _module_path = _module_path + '.' + m\n                # print(_module_path)\n                plg_lib = importlib.import_module(_module_path)\n\n\n    # set cudnn_benchmark\n    if cfg.get('cudnn_benchmark', False):\n        torch.backends.cudnn.benchmark = True\n\n    if 'pretrained' in cfg.model:\n        cfg.model.pretrained = None\n    elif (cfg.model.get('backbone', None) is not None\n          and 'init_cfg' in cfg.model.backbone):\n        cfg.model.backbone.init_cfg = None\n\n    if cfg.model.get('neck'):\n        if isinstance(cfg.model.neck, list):\n            for neck_cfg in cfg.model.neck:\n                if neck_cfg.get('rfp_backbone'):\n                    if neck_cfg.rfp_backbone.get('pretrained'):\n                        neck_cfg.rfp_backbone.pretrained = None\n        elif cfg.model.neck.get('rfp_backbone'):\n            if cfg.model.neck.rfp_backbone.get('pretrained'):\n                cfg.model.neck.rfp_backbone.pretrained = None\n\n    if args.gpu_ids is not None:\n        cfg.gpu_ids = args.gpu_ids[0:1]\n        warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '\n                      'Because we only support single GPU mode in '\n                      'non-distributed testing. Use the first GPU '\n                      'in `gpu_ids` now.')\n    else:\n        cfg.gpu_ids = [args.gpu_id]\n    cfg.device = get_device()\n    # init distributed env first, since logger depends on the dist info.\n    if args.launcher == 'none':\n        distributed = False\n    else:\n        distributed = True\n        init_dist(args.launcher, **cfg.dist_params)\n\n    test_dataloader_default_args = dict(\n        samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False)\n\n    # in case the test dataset is concatenated\n    if isinstance(cfg.data.test, dict):\n        cfg.data.test.test_mode = True\n        if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1:\n            # Replace 'ImageToTensor' to 'DefaultFormatBundle'\n            cfg.data.test.pipeline = replace_ImageToTensor(\n                cfg.data.test.pipeline)\n    elif isinstance(cfg.data.test, list):\n        for ds_cfg in cfg.data.test:\n            ds_cfg.test_mode = True\n        if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1:\n            for ds_cfg in cfg.data.test:\n                ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)\n\n    test_loader_cfg = {\n        **test_dataloader_default_args,\n        **cfg.data.get('test_dataloader', {})\n    }\n\n    rank, _ = get_dist_info()\n    # allows not to create\n    if args.work_dir is not None and rank == 0:\n        mmcv.mkdir_or_exist(osp.abspath(args.work_dir))\n        timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())\n        json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')\n\n    # build the dataloader\n    dataset = build_dataset(cfg.data.test)\n    data_loader = build_dataloader(dataset, **test_loader_cfg)\n\n    # build the model and load checkpoint\n    cfg.model.train_cfg = None\n    model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))\n    fp16_cfg = cfg.get('fp16', None)\n    if fp16_cfg is not None:\n        wrap_fp16_model(model)\n    # checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')\n    checkpoint = {}\n    if args.fuse_conv_bn:\n        model = fuse_conv_bn(model)\n    # old versions did not save class info in checkpoints, this walkaround is\n    # for backward compatibility\n    if 'CLASSES' in checkpoint.get('meta', {}):\n        model.CLASSES = checkpoint['meta']['CLASSES']\n    else:\n        model.CLASSES = dataset.CLASSES\n\n    if not distributed:\n        model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)\n        outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,\n                                  args.show_score_thr)\n    else:\n        model = build_ddp(\n            model,\n            cfg.device,\n            device_ids=[int(os.environ['LOCAL_RANK'])],\n            broadcast_buffers=False)\n\n        # In multi_gpu_test, if tmpdir is None, some tesnors\n        # will init on cuda by default, and no device choice supported.\n        # Init a tmpdir to avoid error on npu here.\n        if cfg.device == 'npu' and args.tmpdir is None:\n            args.tmpdir = './npu_tmpdir'\n\n        outputs = multi_gpu_test(\n            model, data_loader, args.tmpdir, args.gpu_collect\n            or cfg.evaluation.get('gpu_collect', False))\n\n    rank, _ = get_dist_info()\n    if rank == 0:\n        if args.out:\n            print(f'\\nwriting results to {args.out}')\n            mmcv.dump(outputs, args.out)\n        kwargs = {} if args.eval_options is None else args.eval_options\n        if args.format_only:\n            dataset.format_results(outputs, **kwargs)\n        if args.eval:\n            eval_kwargs = cfg.get('evaluation', {}).copy()\n            # hard-code way to remove EvalHook args\n            for key in [\n                    'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',\n                    'rule', 'dynamic_intervals'\n            ]:\n                eval_kwargs.pop(key, None)\n            eval_kwargs.update(dict(metric=args.eval, **kwargs))\n            metric = dataset.evaluate(outputs, **eval_kwargs)\n            print(metric)\n            metric_dict = dict(config=args.config, metric=metric)\n            if args.work_dir is not None and rank == 0:\n                mmcv.dump(metric_dict, json_file)\n\n\nif __name__ == '__main__':\n    main()\n"
  }
]