Full Code of RainBowLuoCS/DiffusionTrack for AI

main 9714f01c21ff cached
99 files
550.2 KB
144.2k tokens
546 symbols
1 requests
Download .txt
Showing preview only (582K chars total). Download the full file or copy to clipboard to get everything.
Repository: RainBowLuoCS/DiffusionTrack
Branch: main
Commit: 9714f01c21ff
Files: 99
Total size: 550.2 KB

Directory structure:
gitextract_zi_bwyml/

├── .gitattributes
├── .gitignore
├── LICENSE
├── README.md
├── diffusion/
│   └── models/
│       ├── diffusion_head.py
│       ├── diffusion_losses.py
│       ├── diffusion_models.py
│       └── diffusionnet.py
├── exps/
│   ├── default/
│   │   ├── nano.py
│   │   ├── yolov3.py
│   │   ├── yolox_l.py
│   │   ├── yolox_m.py
│   │   ├── yolox_s.py
│   │   ├── yolox_tiny.py
│   │   └── yolox_x.py
│   └── example/
│       └── mot/
│           ├── yolox_x_diffusion_det_dancetrack.py
│           ├── yolox_x_diffusion_det_mot17.py
│           ├── yolox_x_diffusion_det_mot17_ablation.py
│           ├── yolox_x_diffusion_det_mot20.py
│           ├── yolox_x_diffusion_track_dancetrack.py
│           ├── yolox_x_diffusion_track_dancetrack_baseline.py
│           ├── yolox_x_diffusion_track_mot17.py
│           ├── yolox_x_diffusion_track_mot17_ablation.py
│           ├── yolox_x_diffusion_track_mot17_baseline.py
│           ├── yolox_x_diffusion_track_mot20.py
│           └── yolox_x_diffusion_track_mot20_baseline.py
├── requirements.txt
├── setup.py
├── tools/
│   ├── convert_bdd100k_to_coco.py
│   ├── convert_cityperson_to_coco.py
│   ├── convert_crowdhuman_to_coco.py
│   ├── convert_dancetrack_to_coco.py
│   ├── convert_ethz_to_coco.py
│   ├── convert_kitti_to_coco.py
│   ├── convert_mot17_to_coco.py
│   ├── convert_mot20_to_coco.py
│   ├── convert_video.py
│   ├── mix_data_ablation.py
│   ├── mix_data_bdd100k.py
│   ├── mix_data_test_mot17.py
│   ├── mix_data_test_mot20.py
│   ├── mota.py
│   ├── track.py
│   ├── train.py
│   └── txt2video.py
└── yolox/
    ├── __init__.py
    ├── core/
    │   ├── __init__.py
    │   ├── launch.py
    │   └── trainer.py
    ├── data/
    │   ├── __init__.py
    │   ├── data_augment.py
    │   ├── data_prefetcher.py
    │   ├── dataloading.py
    │   └── samplers.py
    ├── evaluators/
    │   ├── __init__.py
    │   ├── coco_evaluator.py
    │   ├── diffusion_mot_evaluator.py
    │   ├── diffusion_mot_evaluator_kl.py
    │   └── evaluation.py
    ├── exp/
    │   ├── __init__.py
    │   ├── base_exp.py
    │   ├── build.py
    │   └── yolox_base.py
    ├── layers/
    │   ├── __init__.py
    │   ├── csrc/
    │   │   ├── cocoeval/
    │   │   │   ├── cocoeval.cpp
    │   │   │   └── cocoeval.h
    │   │   └── vision.cpp
    │   └── fast_coco_eval_api.py
    ├── models/
    │   ├── __init__.py
    │   ├── darknet.py
    │   ├── losses.py
    │   ├── network_blocks.py
    │   ├── yolo_fpn.py
    │   ├── yolo_head.py
    │   ├── yolo_pafpn.py
    │   └── yolox.py
    ├── tracker/
    │   ├── basetrack.py
    │   ├── diffusion_tracker.py
    │   ├── diffusion_tracker_kl.py
    │   ├── kalman_filter.py
    │   └── matching.py
    ├── tracking_utils/
    │   ├── evaluation.py
    │   ├── io.py
    │   └── timer.py
    └── utils/
        ├── __init__.py
        ├── allreduce_norm.py
        ├── box_ops.py
        ├── boxes.py
        ├── checkpoint.py
        ├── cluster_nms.py
        ├── demo_utils.py
        ├── dist.py
        ├── ema.py
        ├── logger.py
        ├── lr_scheduler.py
        ├── metric.py
        ├── model_utils.py
        ├── setup_env.py
        └── visualize.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitattributes
================================================
README.assets/MOT20.gif filter=lfs diff=lfs merge=lfs -text
README.assets/dancetrack.gif filter=lfs diff=lfs merge=lfs -text


================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so
datasets/*
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
#   However, in case of collaboration, if having platform-specific dependencies or dependencies
#   having no cross-platform support, pipenv may install dependencies that don't work, or not
#   install all needed dependencies.
#Pipfile.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# output
docs/api
.code-workspace.code-workspace
*.pkl
*.npy
*.pth
*.onnx
*.engine
events.out.tfevents*
pretrained
*_outputs/
DiffusionTrack_*/
datasets/
*.pth.tar
*.tar.gz
src/*
test.py
id_rsa_cs
module_test.py
vis_fold

================================================
FILE: LICENSE
================================================

Attribution-NonCommercial 4.0 International

=======================================================================

Creative Commons Corporation ("Creative Commons") is not a law firm and
does not provide legal services or legal advice. Distribution of
Creative Commons public licenses does not create a lawyer-client or
other relationship. Creative Commons makes its licenses and related
information available on an "as-is" basis. Creative Commons gives no
warranties regarding its licenses, any material licensed under their
terms and conditions, or any related information. Creative Commons
disclaims all liability for damages resulting from their use to the
fullest extent possible.

Using Creative Commons Public Licenses

Creative Commons public licenses provide a standard set of terms and
conditions that creators and other rights holders may use to share
original works of authorship and other material subject to copyright
and certain other rights specified in the public license below. The
following considerations are for informational purposes only, are not
exhaustive, and do not form part of our licenses.

     Considerations for licensors: Our public licenses are
     intended for use by those authorized to give the public
     permission to use material in ways otherwise restricted by
     copyright and certain other rights. Our licenses are
     irrevocable. Licensors should read and understand the terms
     and conditions of the license they choose before applying it.
     Licensors should also secure all rights necessary before
     applying our licenses so that the public can reuse the
     material as expected. Licensors should clearly mark any
     material not subject to the license. This includes other CC-
     licensed material, or material used under an exception or
     limitation to copyright. More considerations for licensors:
   wiki.creativecommons.org/Considerations_for_licensors

     Considerations for the public: By using one of our public
     licenses, a licensor grants the public permission to use the
     licensed material under specified terms and conditions. If
     the licensor's permission is not necessary for any reason--for
     example, because of any applicable exception or limitation to
     copyright--then that use is not regulated by the license. Our
     licenses grant only permissions under copyright and certain
     other rights that a licensor has authority to grant. Use of
     the licensed material may still be restricted for other
     reasons, including because others have copyright or other
     rights in the material. A licensor may make special requests,
     such as asking that all changes be marked or described.
     Although not required by our licenses, you are encouraged to
     respect those requests where reasonable. More_considerations
     for the public: 
   wiki.creativecommons.org/Considerations_for_licensees

=======================================================================

Creative Commons Attribution-NonCommercial 4.0 International Public
License

By exercising the Licensed Rights (defined below), You accept and agree
to be bound by the terms and conditions of this Creative Commons
Attribution-NonCommercial 4.0 International Public License ("Public
License"). To the extent this Public License may be interpreted as a
contract, You are granted the Licensed Rights in consideration of Your
acceptance of these terms and conditions, and the Licensor grants You
such rights in consideration of benefits the Licensor receives from
making the Licensed Material available under these terms and
conditions.

Section 1 -- Definitions.

  a. Adapted Material means material subject to Copyright and Similar
     Rights that is derived from or based upon the Licensed Material
     and in which the Licensed Material is translated, altered,
     arranged, transformed, or otherwise modified in a manner requiring
     permission under the Copyright and Similar Rights held by the
     Licensor. For purposes of this Public License, where the Licensed
     Material is a musical work, performance, or sound recording,
     Adapted Material is always produced where the Licensed Material is
     synched in timed relation with a moving image.

  b. Adapter's License means the license You apply to Your Copyright
     and Similar Rights in Your contributions to Adapted Material in
     accordance with the terms and conditions of this Public License.

  c. Copyright and Similar Rights means copyright and/or similar rights
     closely related to copyright including, without limitation,
     performance, broadcast, sound recording, and Sui Generis Database
     Rights, without regard to how the rights are labeled or
     categorized. For purposes of this Public License, the rights
     specified in Section 2(b)(1)-(2) are not Copyright and Similar
     Rights.
  d. Effective Technological Measures means those measures that, in the
     absence of proper authority, may not be circumvented under laws
     fulfilling obligations under Article 11 of the WIPO Copyright
     Treaty adopted on December 20, 1996, and/or similar international
     agreements.

  e. Exceptions and Limitations means fair use, fair dealing, and/or
     any other exception or limitation to Copyright and Similar Rights
     that applies to Your use of the Licensed Material.

  f. Licensed Material means the artistic or literary work, database,
     or other material to which the Licensor applied this Public
     License.

  g. Licensed Rights means the rights granted to You subject to the
     terms and conditions of this Public License, which are limited to
     all Copyright and Similar Rights that apply to Your use of the
     Licensed Material and that the Licensor has authority to license.

  h. Licensor means the individual(s) or entity(ies) granting rights
     under this Public License.

  i. NonCommercial means not primarily intended for or directed towards
     commercial advantage or monetary compensation. For purposes of
     this Public License, the exchange of the Licensed Material for
     other material subject to Copyright and Similar Rights by digital
     file-sharing or similar means is NonCommercial provided there is
     no payment of monetary compensation in connection with the
     exchange.

  j. Share means to provide material to the public by any means or
     process that requires permission under the Licensed Rights, such
     as reproduction, public display, public performance, distribution,
     dissemination, communication, or importation, and to make material
     available to the public including in ways that members of the
     public may access the material from a place and at a time
     individually chosen by them.

  k. Sui Generis Database Rights means rights other than copyright
     resulting from Directive 96/9/EC of the European Parliament and of
     the Council of 11 March 1996 on the legal protection of databases,
     as amended and/or succeeded, as well as other essentially
     equivalent rights anywhere in the world.

  l. You means the individual or entity exercising the Licensed Rights
     under this Public License. Your has a corresponding meaning.

Section 2 -- Scope.

  a. License grant.

       1. Subject to the terms and conditions of this Public License,
          the Licensor hereby grants You a worldwide, royalty-free,
          non-sublicensable, non-exclusive, irrevocable license to
          exercise the Licensed Rights in the Licensed Material to:

            a. reproduce and Share the Licensed Material, in whole or
               in part, for NonCommercial purposes only; and

            b. produce, reproduce, and Share Adapted Material for
               NonCommercial purposes only.

       2. Exceptions and Limitations. For the avoidance of doubt, where
          Exceptions and Limitations apply to Your use, this Public
          License does not apply, and You do not need to comply with
          its terms and conditions.

       3. Term. The term of this Public License is specified in Section
          6(a).

       4. Media and formats; technical modifications allowed. The
          Licensor authorizes You to exercise the Licensed Rights in
          all media and formats whether now known or hereafter created,
          and to make technical modifications necessary to do so. The
          Licensor waives and/or agrees not to assert any right or
          authority to forbid You from making technical modifications
          necessary to exercise the Licensed Rights, including
          technical modifications necessary to circumvent Effective
          Technological Measures. For purposes of this Public License,
          simply making modifications authorized by this Section 2(a)
          (4) never produces Adapted Material.

       5. Downstream recipients.

            a. Offer from the Licensor -- Licensed Material. Every
               recipient of the Licensed Material automatically
               receives an offer from the Licensor to exercise the
               Licensed Rights under the terms and conditions of this
               Public License.

            b. No downstream restrictions. You may not offer or impose
               any additional or different terms or conditions on, or
               apply any Effective Technological Measures to, the
               Licensed Material if doing so restricts exercise of the
               Licensed Rights by any recipient of the Licensed
               Material.

       6. No endorsement. Nothing in this Public License constitutes or
          may be construed as permission to assert or imply that You
          are, or that Your use of the Licensed Material is, connected
          with, or sponsored, endorsed, or granted official status by,
          the Licensor or others designated to receive attribution as
          provided in Section 3(a)(1)(A)(i).

  b. Other rights.

       1. Moral rights, such as the right of integrity, are not
          licensed under this Public License, nor are publicity,
          privacy, and/or other similar personality rights; however, to
          the extent possible, the Licensor waives and/or agrees not to
          assert any such rights held by the Licensor to the limited
          extent necessary to allow You to exercise the Licensed
          Rights, but not otherwise.

       2. Patent and trademark rights are not licensed under this
          Public License.

       3. To the extent possible, the Licensor waives any right to
          collect royalties from You for the exercise of the Licensed
          Rights, whether directly or through a collecting society
          under any voluntary or waivable statutory or compulsory
          licensing scheme. In all other cases the Licensor expressly
          reserves any right to collect such royalties, including when
          the Licensed Material is used other than for NonCommercial
          purposes.

Section 3 -- License Conditions.

Your exercise of the Licensed Rights is expressly made subject to the
following conditions.

  a. Attribution.

       1. If You Share the Licensed Material (including in modified
          form), You must:

            a. retain the following if it is supplied by the Licensor
               with the Licensed Material:

                 i. identification of the creator(s) of the Licensed
                    Material and any others designated to receive
                    attribution, in any reasonable manner requested by
                    the Licensor (including by pseudonym if
                    designated);

                ii. a copyright notice;

               iii. a notice that refers to this Public License;

                iv. a notice that refers to the disclaimer of
                    warranties;

                 v. a URI or hyperlink to the Licensed Material to the
                    extent reasonably practicable;

            b. indicate if You modified the Licensed Material and
               retain an indication of any previous modifications; and

            c. indicate the Licensed Material is licensed under this
               Public License, and include the text of, or the URI or
               hyperlink to, this Public License.

       2. You may satisfy the conditions in Section 3(a)(1) in any
          reasonable manner based on the medium, means, and context in
          which You Share the Licensed Material. For example, it may be
          reasonable to satisfy the conditions by providing a URI or
          hyperlink to a resource that includes the required
          information.

       3. If requested by the Licensor, You must remove any of the
          information required by Section 3(a)(1)(A) to the extent
          reasonably practicable.

       4. If You Share Adapted Material You produce, the Adapter's
          License You apply must not prevent recipients of the Adapted
          Material from complying with this Public License.

Section 4 -- Sui Generis Database Rights.

Where the Licensed Rights include Sui Generis Database Rights that
apply to Your use of the Licensed Material:

  a. for the avoidance of doubt, Section 2(a)(1) grants You the right
     to extract, reuse, reproduce, and Share all or a substantial
     portion of the contents of the database for NonCommercial purposes
     only;

  b. if You include all or a substantial portion of the database
     contents in a database in which You have Sui Generis Database
     Rights, then the database in which You have Sui Generis Database
     Rights (but not its individual contents) is Adapted Material; and

  c. You must comply with the conditions in Section 3(a) if You Share
     all or a substantial portion of the contents of the database.

For the avoidance of doubt, this Section 4 supplements and does not
replace Your obligations under this Public License where the Licensed
Rights include other Copyright and Similar Rights.

Section 5 -- Disclaimer of Warranties and Limitation of Liability.

  a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
     EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
     AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
     ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
     IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
     WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
     PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
     ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
     KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
     ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.

  b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
     TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
     NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
     INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
     COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
     USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
     ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
     DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
     IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.

  c. The disclaimer of warranties and limitation of liability provided
     above shall be interpreted in a manner that, to the extent
     possible, most closely approximates an absolute disclaimer and
     waiver of all liability.

Section 6 -- Term and Termination.

  a. This Public License applies for the term of the Copyright and
     Similar Rights licensed here. However, if You fail to comply with
     this Public License, then Your rights under this Public License
     terminate automatically.

  b. Where Your right to use the Licensed Material has terminated under
     Section 6(a), it reinstates:

       1. automatically as of the date the violation is cured, provided
          it is cured within 30 days of Your discovery of the
          violation; or

       2. upon express reinstatement by the Licensor.

     For the avoidance of doubt, this Section 6(b) does not affect any
     right the Licensor may have to seek remedies for Your violations
     of this Public License.

  c. For the avoidance of doubt, the Licensor may also offer the
     Licensed Material under separate terms or conditions or stop
     distributing the Licensed Material at any time; however, doing so
     will not terminate this Public License.

  d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
     License.

Section 7 -- Other Terms and Conditions.

  a. The Licensor shall not be bound by any additional or different
     terms or conditions communicated by You unless expressly agreed.

  b. Any arrangements, understandings, or agreements regarding the
     Licensed Material not stated herein are separate from and
     independent of the terms and conditions of this Public License.

Section 8 -- Interpretation.

  a. For the avoidance of doubt, this Public License does not, and
     shall not be interpreted to, reduce, limit, restrict, or impose
     conditions on any use of the Licensed Material that could lawfully
     be made without permission under this Public License.

  b. To the extent possible, if any provision of this Public License is
     deemed unenforceable, it shall be automatically reformed to the
     minimum extent necessary to make it enforceable. If the provision
     cannot be reformed, it shall be severed from this Public License
     without affecting the enforceability of the remaining terms and
     conditions.

  c. No term or condition of this Public License will be waived and no
     failure to comply consented to unless expressly agreed to by the
     Licensor.

  d. Nothing in this Public License constitutes or may be interpreted
     as a limitation upon, or waiver of, any privileges and immunities
     that apply to the Licensor or You, including from the legal
     processes of any jurisdiction or authority.

=======================================================================

Creative Commons is not a party to its public
licenses. Notwithstanding, Creative Commons may elect to apply one of
its public licenses to material it publishes and in those instances
will be considered the “Licensor.” The text of the Creative Commons
public licenses is dedicated to the public domain under the CC0 Public
Domain Dedication. Except for the limited purpose of indicating that
material is shared under a Creative Commons public license or as
otherwise permitted by the Creative Commons policies published at
creativecommons.org/policies, Creative Commons does not authorize the
use of the trademark "Creative Commons" or any other trademark or logo
of Creative Commons without its prior written consent including,
without limitation, in connection with any unauthorized modifications
to any of its public licenses or any other arrangements,
understandings, or agreements concerning use of licensed material. For
the avoidance of doubt, this paragraph does not form part of the
public licenses.

Creative Commons may be contacted at creativecommons.org.


================================================
FILE: README.md
================================================
## DiffusionTrack:Diffusion Model For Multi-Object Tracking

**DiffusionTrack is the first work of diffusion model for multi-object tracking.**

![image-20230819130751450](README.assets/image-20230819130751450.png)

[**DiffusionTrack:Diffusion Model For Multi-Object Tracking**](https://arxiv.org/abs/2308.09905)

Run Luo, Zikai Song, Lintao Ma, Jinlin Wei

*[arXiv 2308.09905](https://arxiv.org/abs/2308.09905)*

## Tracking performance

### Results on MOT17 challenge test set with 15.89 FPS

| Method             | MOTA     | IDF1     | HOTA     | AssA     | DetA     |
| ------------------ | -------- | -------- | -------- | -------- | -------- |
| TrackFormer        | 74.1     | 68.0     | 57.3     | 54.1     | 60.9     |
| MeMOT              | 72.5     | 69.0     | 56.9     | 55.2     | /        |
| MOTR               | 71.9     | 68.4     | 57.2     | 55.8     | /        |
| CenterTrack        | 67.8     | 64.7     | 52.2     | 51.0     | 53.8     |
| PermaTrack         | 73.8     | 68.9     | 55.5     | 53.1     | 58.5     |
| TransCenter        | 73.2     | 62.2     | 54.5     | 49.7     | 60.1     |
| GTR                | 75.3     | 71.5     | 59.1     | 57.0     | 61.6     |
| TubeTK             | 63.0     | 58.6     | /        | /        | /        |
| **DiffusionTrack** | **77.9** | **73.8** | **60.8** | **58.8** | **63.2** |

### Results on MOT20 challenge test set with 13.37 FPS

| Method             | MOTA     | IDF1     | HOTA     | AssA     | DetA     |
| ------------------ | -------- | -------- | -------- | -------- | -------- |
| TrackFormer        | 68.6     | 65.7     | 54.7     | 53.0     | 56.7     |
| MeMOT              | 63.7     | 66.1     | 54.1     | **55.0** | /        |
| TransCenter        | 67.7     | 58.7     | /        | /        | /        |
| **DiffusionTrack** | **72.8** | **66.3** | **55.3** | 51.3     | **59.9** |

### Results on Dancetrack challenge test set with 21.05 FPS

| Method             | MOTA     | IDF1     | HOTA     | AssA     | DetA     |
| ------------------ | -------- | -------- | -------- | -------- | -------- |
| TransTrack         | 88.4     | 45.2     | 45.5     | 27.5     | 75.9     |
| CenterTrack        | 86.8     | 35.7     | 41.8     | 22.6     | 78.1     |
| **DiffusionTrack** | **89.3** | **47.5** | **52.4** | **33.5** | **82.2** |

### Visualization results

![MOT20](README.assets/MOT20.gif)

![dancetrack](README.assets/dancetrack.gif)

### Robustness to detection perturbation

![image-20230819134931428](README.assets/image-20230819134931428.png)

## Installation

Step1. Install requirements for DiffusionTrack.

```
git clone https://github.com/RainBowLuoCS/DiffusionTrack.git
cd DiffusionTrack
pip3 install -r requirements.txt
python3 setup.py develop
```

Step2. Install [pycocotools](https://github.com/cocodataset/cocoapi).

```
pip3 install cython; pip3 install 'git+https://github.com/cocodataset/cocoapi.git#subdirectory=PythonAPI'
```

Step3. Others

```
pip3 install cython_bbox
```

Step4. Install detectron2

```
git clone https://github.com/facebookresearch/detectron2.git
python -m pip install -e detectron2
```

## Data preparation

Download [MOT17](https://motchallenge.net/), [MOT20](https://motchallenge.net/), [CrowdHuman](https://www.crowdhuman.org/), [Cityperson](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md), [ETHZ](https://github.com/Zhongdao/Towards-Realtime-MOT/blob/master/DATASET_ZOO.md) ,[Dancetrack](https://github.com/DanceTrack/DanceTrack) put them under <DiffusionTrack_HOME>/datasets in the following structure:

```
datasets
   |——————mot
   |        └——————train
   |        └——————test
   └——————crowdhuman
   |         └——————Crowdhuman_train
   |         └——————Crowdhuman_val
   |         └——————annotation_train.odgt
   |         └——————annotation_val.odgt
   └——————MOT20
   |        └——————train
   |        └——————test
   └——————dancetrack
   |        └——————train
   |        └——————test
   └——————Cityscapes
   |        └——————images
   |        └——————labels_with_ids
   └——————ETHZ
            └——————eth01
            └——————...
            └——————eth07
```

Then, you need to turn the datasets to COCO format and mix different training data:

```
cd <DiffusionTrack_HOME>
python3 tools/convert_mot17_to_coco.py
python3 tools/convert_dancetrack_to_coco.py
python3 tools/convert_mot20_to_coco.py
python3 tools/convert_crowdhuman_to_coco.py
python3 tools/convert_cityperson_to_coco.py
python3 tools/convert_ethz_to_coco.py
```

Before mixing different datasets, you need to follow the operations in [mix_xxx.py](https://github.com/ifzhang/ByteTrack/blob/c116dfc746f9ebe07d419caa8acba9b3acfa79a6/tools/mix_data_ablation.py#L6) to create a data folder and link. Finally, you can mix the training data:

```
cd <DiffusionTrack_HOME>
python3 tools/mix_data_ablation.py
python3 tools/mix_data_test_mot17.py
python3 tools/mix_data_test_mot20.py
```

## Model zoo

You can download our model weight from [our model zoo](https://drive.google.com/drive/folders/1xfBo04Ncm504xFUMtC4_0g0Bf61yPsXh?usp=sharing). We provide a 32-bit precision model, you can load it and then use half-precision fine-tuning to get a 16-bit precision model weight, so that you will get the above inference speed.

## Training

The  pretrained YOLOX model can be downloaded from their [model zoo](https://github.com/ifzhang/ByteTrack). After downloading the pretrained models, you can put them under <DiffusionTrack_HOME>/pretrained.

- **Train ablation model (MOT17 half train and CrowdHuman)**

```
cd <DiffusionTrack_HOME>
python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_mot17_ablation.py -d 8 -b 16 -o -c pretrained/bytetrack_ablation.pth.tar
python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py -d 8 -b 16 -o -c pretrained/diffusiontrack_ablation_det.pth.tar
```

- **Train MOT17 test model (MOT17 train, CrowdHuman, Cityperson and ETHZ)**

```
cd <DiffusionTrack_HOME>
python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_mot17.py -d 8 -b 16 -o -c pretrained/bytetrack_x_mot17.pth.tar
python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_mot17.py -d 8 -b 16 -o -c pretrained/diffusiontrack_mot17_det.pth.tar
```

- **Train MOT20 test model (MOT20 train, CrowdHuman)**

```
cd <DiffusionTrack_HOME>
python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_mot20.py -d 8 -b 16 -o -c pretrained/bytetrack_x_mot20.pth.tar
python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_mot20.py -d 8 -b 16 -o -c pretrained/diffusiontrack_mot20_det.pth.tar
```

**Train Dancetrack test model (Dancetrack)**

```
cd <DiffusionTrack_HOME>
python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_det_dancetrack.py -d 8 -b 16 -o -c pretrained/bytetrack_x_mot17.pth.tar
python3 tools/train.py -f exps/example/mot/yolox_x_diffusion_track_dancetrack.py -d 8 -b 16 -o -c pretrained/diffusiontrack_dancetrack_det.pth.tar
```

## Tracking

- **Evaluation on MOT17 half val**

```
cd <DiffusionTrack_HOME>
python3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py -c pretrained/diffusiontrack_ablation_track.pth.tar -b 1 -d 1 --fuse
```

- **Test on MOT17**

```
cd <DiffusionTrack_HOME>
python3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_mot17.py -c pretrained/diffusiontrack_mot17_track.pth.tar -b 1 -d 1 --fuse
```

- **Test on MOT20**

```
cd <DiffusionTrack_HOME>
python3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_mot20.py -c pretrained/diffusiontrack_mot20_track.pth.tar -b 1 -d 1 --fuse
```

- **Test on Dancetrack**

```
cd <DiffusionTrack_HOME>
python3 tools/track.py -f exps/example/mot/yolox_x_diffusion_track_dancetrack.py -c pretrained/diffusiontrack_dancetrack_track.pth.tar -b 1 -d 1 --fuse
```

## News
- (2024.02) [DiffMOT](https://github.com/Kroery/DiffMOT.git) is accepted by CVPR2024, demonstrating the potential of the diffusion-based tracker and once again validating our visionary insights, congratulations!
- (2023.12) Our paper is accepted by AAAI2024!
- (2023.08) Code is released!
- (2023.06) Despite being rejected by NIPS2023, we firmly believe the diffusion model is a novel solution for multi-object tracking problems.
- (2022.11) Write the first line of the code for this great idea!

## License

This project is under the CC-BY-NC 4.0 license. See [LICENSE](https://github.com/ShoufaChen/DiffusionDet/blob/main/LICENSE) for details.

## Citation

If you use DiffusionTrack in your research or wish to refer to the baseline results published here, please use the following BibTeX entry.

```
@article{luo2023diffusiontrack,
  title={DiffusionTrack: Diffusion Model For Multi-Object Tracking},
  author={Luo, Run and Song, Zikai and Ma, Lintao and Wei, Jinlin and Yang, Wei and Yang, Min},
  journal={arXiv preprint arXiv:2308.09905},
  year={2023}
}
```

## Acknowledgement

A large part of the code is borrowed from [ByteTrack](https://github.com/ifzhang/ByteTrack) and [DiffusionDet](https://github.com/ShoufaChen/DiffusionDet) thanks for their wonderful works.



================================================
FILE: diffusion/models/diffusion_head.py
================================================
import math
import random
from collections import namedtuple

import torch
import torch.nn.functional as F
from torch import nn
from torchvision.ops import nms,box_iou

from .diffusion_losses import SetCriterionDynamicK, HungarianMatcherDynamicK
from .diffusion_models import DynamicHead

from yolox.utils.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh
from yolox.utils import synchronize
from detectron2.layers import batched_nms
import time

ModelPrediction = namedtuple('ModelPrediction', ['pred_noise', 'pred_x_start'])


def exists(x):
    return x is not None


def default(val, d):
    if exists(val):
        return val
    return d() if callable(d) else d


def extract(a, t, x_shape):
    """extract the appropriate  t  index for a batch of indices"""
    batch_size = t.shape[0]
    out = a.gather(-1, t)
    return out.reshape(batch_size, *((1,) * (len(x_shape) - 1)))


def cosine_beta_schedule(timesteps, s=0.008):
    """
    cosine schedule
    as proposed in https://openreview.net/forum?id=-NEXDKk8gZ
    """
    steps = timesteps + 1
    x = torch.linspace(0, timesteps, steps, dtype=torch.float64)
    alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * math.pi * 0.5) ** 2
    alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
    betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1])
    return torch.clip(betas, 0, 0.999)

class DiffusionHead(nn.Module):
    """
    Implement DiffusionHead
    """

    def __init__(self,
                num_classes,
                width=1.0,
                strides=[8, 16, 32],
                num_proposals=500,
                num_heads=6,):
        super().__init__()
        self.device="cpu"
        self.dtype=torch.float32
        self.width=width
        self.num_classes = num_classes
        self.num_proposals = num_proposals
        # self.num_proposals = 512
        self.hidden_dim = int(256*width)
        self.num_heads = num_heads

        # build diffusion
        timesteps = 1000
        sampling_timesteps = 1
        self.objective = 'pred_x0'
        betas = cosine_beta_schedule(timesteps)
        alphas = 1. - betas
        alphas_cumprod = torch.cumprod(alphas, dim=0)
        alphas_cumprod_prev = F.pad(alphas_cumprod[:-1], (1, 0), value=1.)
        timesteps, = betas.shape
        self.num_timesteps = int(timesteps)

        # tracking setting
        self.inference_time_range=1
        self.track_candidate=1
        self.candidate_num_strategy=max

        self.sampling_timesteps = default(sampling_timesteps, timesteps)
        assert self.sampling_timesteps <= timesteps
        self.is_ddim_sampling = self.sampling_timesteps < timesteps
        self.ddim_sampling_eta = 1.
        self.self_condition = False
        self.scale = 2.0
        self.box_renewal = True
        self.use_ensemble = True

        self.register_buffer('betas', betas)
        self.register_buffer('alphas_cumprod', alphas_cumprod)
        self.register_buffer('alphas_cumprod_prev', alphas_cumprod_prev)

        # calculations for diffusion q(x_t | x_{t-1}) and others

        self.register_buffer('sqrt_alphas_cumprod', torch.sqrt(alphas_cumprod))
        self.register_buffer('sqrt_one_minus_alphas_cumprod', torch.sqrt(1. - alphas_cumprod))
        self.register_buffer('log_one_minus_alphas_cumprod', torch.log(1. - alphas_cumprod))
        self.register_buffer('sqrt_recip_alphas_cumprod', torch.sqrt(1. / alphas_cumprod))
        self.register_buffer('sqrt_recipm1_alphas_cumprod', torch.sqrt(1. / alphas_cumprod - 1))

        # calculations for posterior q(x_{t-1} | x_t, x_0)

        posterior_variance = betas * (1. - alphas_cumprod_prev) / (1. - alphas_cumprod)

        # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t)

        self.register_buffer('posterior_variance', posterior_variance)

        # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain

        self.register_buffer('posterior_log_variance_clipped', torch.log(posterior_variance.clamp(min=1e-20)))
        self.register_buffer('posterior_mean_coef1', betas * torch.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))
        self.register_buffer('posterior_mean_coef2',
                             (1. - alphas_cumprod_prev) * torch.sqrt(alphas) / (1. - alphas_cumprod))

        # Build Dynamic Head.
        class_weight = 2.0
        giou_weight = 2.0
        l1_weight = 5.0
        no_object_weight =0.1
        self.deep_supervision = True
        self.use_focal = True
        self.use_fed_loss = False
        self.use_nms = False
        self.pooler_resolution=7
        self.noise_strategy="xywh"
   
        self.head = DynamicHead(num_classes,self.hidden_dim,self.pooler_resolution,strides,[self.hidden_dim]*len(strides),return_intermediate=self.deep_supervision,num_heads=self.num_heads,use_focal=self.use_focal,use_fed_loss=self.use_fed_loss)
        # Loss parameters:

        # Build Criterion.
        matcher = HungarianMatcherDynamicK(
            cost_class=class_weight, cost_bbox=l1_weight, cost_giou=giou_weight, use_focal=self.use_focal,use_fed_loss=self.use_fed_loss
        )
        weight_dict = {"loss_ce": class_weight, "loss_bbox": l1_weight, "loss_giou": giou_weight}
        if self.deep_supervision:
            aux_weight_dict = {}
            for i in range(self.num_heads - 1):
                aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()})
            weight_dict.update(aux_weight_dict)

        losses = ["labels", "boxes"]

        self.criterion = SetCriterionDynamicK(
            num_classes=self.num_classes, matcher=matcher, weight_dict=weight_dict, eos_coef=no_object_weight,
            losses=losses, use_focal=self.use_focal,use_fed_loss=self.use_fed_loss)

    def predict_noise_from_start(self, x_t, t, x0):
        return (
                (extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - x0) /
                extract(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)
        )

    def model_predictions(self, backbone_feats,images_whwh,x,t,lost_features=None,fix_bboxes=False,x_self_cond=None,clip_x_start=False):

        def prepare(x,images_whwh):
            x_boxes = torch.clamp(x, min=-1 * self.scale, max=self.scale)
            x_boxes = ((x_boxes / self.scale) + 1) / 2
            x_boxes = box_cxcywh_to_xyxy(x_boxes)
            x_boxes = x_boxes * images_whwh[:, None, :]
            return x_boxes
        
        def post(x_start,images_whwh):
            x_start = x_start / images_whwh[:, None, :]
            x_start = box_xyxy_to_cxcywh(x_start)
            x_start = (x_start * 2 - 1.) * self.scale
            x_start = torch.clamp(x_start, min=-1 * self.scale, max=self.scale)
            return x_start
        
        bs=len(x)//2
        bboxes=prepare(x,images_whwh=images_whwh)
        start_time=time.time()
        outputs_class, outputs_coord,outputs_score = self.head(backbone_feats,torch.split(bboxes,bs,dim=0),t,lost_features,fix_bboxes)
        end_time=time.time()

        x_start = outputs_coord[-1]  # (batch, num_proposals, 4) predict boxes: absolute coordinates (x1, y1, x2, y2)
        x_start=post(x_start,images_whwh=images_whwh)
        pred_noise = self.predict_noise_from_start(x,t,x_start)
        return ModelPrediction(pred_noise, x_start), outputs_class,outputs_coord,outputs_score,end_time-start_time
    
    @torch.no_grad()
    def new_ddim_sample(self,backbone_feats,images_whwh,ref_targets=None,dynamic_time=True,num_timesteps=1,num_proposals=500,inference_time_range=1,track_candidate=1,diffusion_t=200,clip_denoised=True):
        batch = images_whwh.shape[0]//2
        self.sampling_timesteps,self.num_proposals,self.track_candidate,self.inference_time_range=num_timesteps,num_proposals,track_candidate,inference_time_range
        shape = (batch, self.num_proposals, 4)
        cur_bboxes= torch.randn(shape,device=self.device,dtype=self.dtype)
        ref_t_list=[]
        track_t_list=[]
        total_time=0
        if ref_targets is None or self.track_candidate==0:
            ref_bboxes=torch.randn(shape, device=self.device)
            for i in range(batch):
                t = torch.randint(self.num_timesteps-self.inference_time_range, self.num_timesteps,(2,), device=self.device).long()
                if dynamic_time:
                    ref_t,track_t=t[0],t[1]
                else:
                    ref_t,track_t=t[0],t[0]
                ref_t_list.append(ref_t)
                track_t_list.append(track_t)
        else:
            labels =ref_targets[..., :5]
            nlabel = (labels.sum(dim=2) > 0).sum(dim=1)  # number of objects
            shape = (batch, self.num_proposals, 4)
            diffused_boxes = []
            cur_diffused_boxes=[]
            for batch_idx,num_gt in enumerate(nlabel):
                gt_bboxes_per_image = box_cxcywh_to_xyxy(labels[batch_idx, :num_gt])
                image_size_xyxy = images_whwh[batch_idx]
                gt_boxes = gt_bboxes_per_image  / image_size_xyxy
                # cxcywh
                gt_boxes = box_xyxy_to_cxcywh(gt_boxes)
                # t = torch.randint(self.num_timesteps-self.inference_time_range, self.num_timesteps,(2,), device=self.device).long()
                # if dynamic_time:
                #     ref_t,track_t=t[0],t[1]
                # else:
                #     ref_t,track_t=t[0],t[0]
                if batch_idx==0:
                    ref_t=diffusion_t
                    track_t=diffusion_t
                else:
                    ref_t=diffusion_t
                    track_t=diffusion_t
                    self.track_candidate=4
                d_boxes,d_noise,ref_label= self.prepare_diffusion_concat(gt_boxes,ref_t)
                diffused_boxes.append(d_boxes)
                ref_t_list.append(ref_t)
                d_boxes,d_noise,ref_label= self.prepare_diffusion_concat(gt_boxes,track_t,ref_label)
                cur_diffused_boxes.append(d_boxes)
                track_t_list.append(track_t)
            ref_bboxes=torch.stack(diffused_boxes)
            cur_bboxes=torch.stack(cur_diffused_boxes)


        sampling_timesteps, eta= self.sampling_timesteps, self.ddim_sampling_eta

        def get_time_pairs(t,sampling_timesteps):
            # [-1, 0, 1, 2, ..., T-1] when sampling_timesteps == total_timesteps
            times = torch.linspace(-1, t - 1, steps=sampling_timesteps + 1)
            times = list(reversed(times.int().tolist()))
            time_pairs = list(zip(times[:-1], times[1:]))  # [(T-1, T-2), (T-2, T-3), ..., (1, 0), (0, -1)]
            return time_pairs
        
        ref_t_time_pairs_list=torch.tensor([get_time_pairs(t,sampling_timesteps) for t in ref_t_list],device=self.device,dtype=torch.long)
        track_t_time_pairs_list=torch.tensor([get_time_pairs(t,sampling_timesteps) for t in track_t_list],device=self.device,dtype=torch.long)
        # (batch,sampling_timesteps,2)
        bboxes=torch.cat([ref_bboxes,cur_bboxes],dim=0)

        x_start = None
        # for (ref_time, ref_time_next),(cur_time, cur_time_next) in zip(ref_time_pairs,cur_time_pairs):
        for sampling_timestep in range(sampling_timesteps):
            is_last=sampling_timestep==(sampling_timesteps-1)

            ref_time_cond = ref_t_time_pairs_list[:,sampling_timestep,0]
            cur_time_cond = track_t_time_pairs_list[:,sampling_timestep,0]

            time_cond=torch.cat([ref_time_cond,cur_time_cond],dim=0)

            self_cond = x_start if self.self_condition else None

            preds, outputs_class, outputs_coord,outputs_score,association_time = self.model_predictions(backbone_feats,images_whwh,bboxes,time_cond,fix_bboxes=False,
                                                                         x_self_cond=self_cond, clip_x_start=clip_denoised)
            total_time+=association_time
            pred_noise, x_start = preds.pred_noise, preds.pred_x_start
                

            if is_last:
                bboxes = x_start
                continue

            if self.box_renewal:  # filter
                remain_list=[]
                pre_remain_bboxes=[]
                pre_remain_x_start=[]
                pre_remain_pred_noise=[]
                cur_remain_bboxes=[]
                cur_remain_x_start=[]
                cur_remain_pred_noise=[]
                for i in range(batch):
                    # if i==0:
                    #     remain_list.append(len(pred_noise[i,:,:]))
                    #     pre_remain_pred_noise.append(pred_noise[i,:,:])
                    #     cur_remain_pred_noise.append(pred_noise[i+batch,:,:])
                    #     pre_remain_x_start.append(x_start[i,:,:])
                    #     cur_remain_x_start.append(x_start[i+batch,:,:])
                    #     pre_remain_bboxes.append(bboxes[i,:,:])
                    #     cur_remain_bboxes.append(bboxes[i+batch,:,:])
                    # else:
                    threshold = 0.2
                    score_per_image = outputs_score[-1][i]
                    # pre_score=torch.sqrt(score_per_image*torch.sigmoid(outputs_class[-1][i]))
                    # cur_score=torch.sqrt(score_per_image*torch.sigmoid(outputs_class[-1][i+batch]))
                    # value=((pre_score+cur_score)/2).flatten()
                    value, _ = torch.max(score_per_image, -1, keepdim=False)
                    keep_idx = value >=threshold
                    num_remain = torch.sum(keep_idx)
                    remain_list.append(num_remain)
                    pre_remain_pred_noise.append(pred_noise[i,keep_idx,:])
                    cur_remain_pred_noise.append(pred_noise[i+batch,keep_idx,:])
                    pre_remain_x_start.append(x_start[i,keep_idx,:])
                    cur_remain_x_start.append(x_start[i+batch,keep_idx,:])
                    pre_remain_bboxes.append(bboxes[i,keep_idx,:])
                    cur_remain_bboxes.append(bboxes[i+batch,keep_idx,:])
                x_start=pre_remain_x_start+cur_remain_x_start
                bboxes=pre_remain_bboxes+cur_remain_bboxes
                pred_noise=pre_remain_pred_noise+cur_remain_pred_noise

            def diffusion(sampling_times,bboxes,x_start,pred_noise):
                
                times,time_nexts=sampling_times[:,0],sampling_times[:,1]

                alpha = torch.tensor([self.alphas_cumprod[time] for time in times],dtype=self.dtype,device=self.device)
                alpha_next = torch.tensor([self.alphas_cumprod[time_next] for time_next in time_nexts],dtype=self.dtype,device=self.device)

                sigma = eta * ((1 - alpha / alpha_next) * (1 - alpha_next) / (1 - alpha)).sqrt()
                c = (1 - alpha_next - sigma ** 2).sqrt()

                if self.box_renewal:
                    for i in range(batch):
                        noise = torch.randn_like(bboxes[i])
                        bboxes[i] = x_start[i] * alpha_next[i].sqrt() + \
                            c[i] * pred_noise[i] + \
                            sigma[i] * noise
                        
                        bboxes[i] = torch.cat((bboxes[i], torch.randn(self.num_proposals - remain_list[i], 4, device=self.device)), dim=0)
                else:
                    noise = torch.randn_like(bboxes)

                    bboxes = x_start * alpha_next.sqrt()[:,None,None] + \
                        c[:,None,None] * pred_noise + \
                        sigma[:,None,None] * noise
                
                return bboxes
            
            bboxes[:batch]=diffusion(ref_t_time_pairs_list[:,sampling_timestep],bboxes[:batch],x_start[:batch],pred_noise[:batch])
            bboxes[batch:]=diffusion(track_t_time_pairs_list[:,sampling_timestep],bboxes[batch:],x_start[batch:],pred_noise[batch:])

            if self.box_renewal:
                bboxes=torch.stack(bboxes)

        box_cls = outputs_class[-1]
        box_pred = outputs_coord[-1]
        conf_score=outputs_score[-1]

        return torch.cat([box_pred.view(2*batch,-1,4),box_cls.view(2*batch,-1,1)],dim=-1),conf_score.view(batch,-1,1),total_time
    
    # forward diffusion
    def q_sample(self, x_start, t, noise=None):
        if noise is None:
            noise = torch.randn_like(x_start)

        sqrt_alphas_cumprod_t = extract(self.sqrt_alphas_cumprod, t, x_start.shape)
        sqrt_one_minus_alphas_cumprod_t = extract(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)

        return sqrt_alphas_cumprod_t * x_start + sqrt_one_minus_alphas_cumprod_t * noise

    def forward(self,features,mate_info,targets=None):

        mate_shape,mate_device,mate_dtype=mate_info
        self.device=mate_device
        self.dtype=mate_dtype
        b,_,h,w=mate_shape
        
        images_whwh=torch.tensor([w, h, w, h], dtype=self.dtype, device=self.device)[None,:].expand(2*b,4)
        if not self.training:
            results = self.new_ddim_sample(features,images_whwh,targets,dynamic_time=False)
            return results

        if self.training:
            targets, x_boxes, noises, t = self.prepare_targets(targets,images_whwh)
            t=t.squeeze(-1)
            # t[b:]=t[:b]
            x_boxes = x_boxes * images_whwh[:,None,:]
            pre_x_boxes,cur_x_boxes=torch.split(x_boxes,b,dim=0)

            outputs_class,outputs_coord,outputs_score = self.head(features,(pre_x_boxes,cur_x_boxes),t)
            output = {'pred_logits': outputs_class[-1], 'pred_boxes': outputs_coord[-1],'pred_scores':outputs_score[-1]}

            if self.deep_supervision:
                output['aux_outputs'] = [{'pred_logits': a, 'pred_boxes': b,'pred_scores': c}
                                         for a, b, c in zip(outputs_class[:-1], outputs_coord[:-1],outputs_score[:-1])]
            loss_dict = self.criterion(output, targets)
            weight_dict = self.criterion.weight_dict
            for k in loss_dict.keys():
                if k in weight_dict: 
                    loss_dict[k] *= weight_dict[k]
            return loss_dict
 
    def prepare_diffusion_repeat(self,gt_boxes,t,ref_repeat_tensor=None):
        """
        :param gt_boxes: (cx, cy, w, h), normalized
        :param num_proposals:
        """
        t = torch.full((1,),t,device=self.device).long()

        noise = torch.randn(self.num_proposals,4,device=self.device,dtype=self.dtype)

        num_gt = gt_boxes.shape[0]
        if not num_gt:  # generate fake gt boxes if empty gt boxes
            gt_boxes = torch.as_tensor([[0.5, 0.5, 1., 1.]], dtype=self.dtype, device=self.device)
            num_gt = 1

        num_repeat = self.num_proposals // num_gt  # number of repeat except the last gt box in one image
        repeat_tensor = [num_repeat] * (num_gt - self.num_proposals % num_gt) + [num_repeat + 1] * (
                self.num_proposals % num_gt)
        assert sum(repeat_tensor) == self.num_proposals
        random.shuffle(repeat_tensor)
        repeat_tensor = torch.tensor(repeat_tensor, device=self.device)
        if ref_repeat_tensor is not None:
            repeat_tensor=ref_repeat_tensor

        gt_boxes = (gt_boxes * 2. - 1.) * self.scale
        x_start = torch.repeat_interleave(gt_boxes, repeat_tensor, dim=0)

        if self.noise_strategy=="xy":
            noise[:,2:]=0
        # noise sample
        x = self.q_sample(x_start=x_start, t=t, noise=noise)

        if self.training:
            x = torch.clamp(x, min=-1 * self.scale, max=self.scale)
            x = ((x / self.scale) + 1) / 2.

            diff_boxes = box_cxcywh_to_xyxy(x)
        else:
            diff_boxes=x

        return diff_boxes,noise,repeat_tensor

    def prepare_diffusion_concat(self,gt_boxes,t,ref_mask=None):
        """
        :param gt_boxes: (cx, cy, w, h), normalized
        :param num_proposals:
        """
        if self.training:
            self.track_candidate=1
        t = torch.full((1,),t,device=self.device).long()
        noise = torch.randn(self.num_proposals, 4, device=self.device,dtype=self.dtype)
        select_mask=None
        num_gt = gt_boxes.shape[0]*self.track_candidate
        if not num_gt:  # generate fake gt boxes if empty gt boxes
            gt_boxes = torch.as_tensor([[0.5, 0.5, 1., 1.]], dtype=self.dtype, device=self.device)
            num_gt = 1
        else:
            gt_boxes=torch.repeat_interleave(gt_boxes,torch.tensor([self.track_candidate]*gt_boxes.shape[0],device=self.device),dim=0)
        if num_gt < self.num_proposals:
            box_placeholder = torch.randn(self.num_proposals - num_gt, 4,
                                          device=self.device,dtype=self.dtype) / 6. + 0.5  # 3sigma = 1/2 --> sigma: 1/6
            # box_placeholder=torch.clip(torch.poisson(torch.clip(box_placeholder*5,min=0)),min=1,max=10)/10
            # box_placeholder=torch.nn.init.uniform_(box_placeholder, a=0, b=1)
            # box_placeholder=torch.ones_like(box_placeholder)
            # box_placeholder[:,:2]=box_placeholder[:,:2]/2
            box_placeholder[:, 2:4] = torch.clip(box_placeholder[:, 2:4], min=1e-4)
            x_start = torch.cat((gt_boxes, box_placeholder), dim=0)
        elif num_gt > self.num_proposals:
            select_mask = [True] * self.num_proposals + [False] * (num_gt - self.num_proposals)
            random.shuffle(select_mask)
            if ref_mask is not None:
                select_mask=ref_mask
            x_start = gt_boxes[select_mask]
        else:
            x_start = gt_boxes

        x_start = (x_start * 2. - 1.) * self.scale

        if self.noise_strategy=="xy":
            noise[:,2:]=0
        # noise sample
        x = self.q_sample(x_start=x_start, t=t, noise=noise)

        if self.training:
            # x=x_start

            x = torch.clamp(x, min=-1 * self.scale, max=self.scale)
            x = ((x / self.scale) + 1) / 2.

            diff_boxes = box_cxcywh_to_xyxy(x)
        else:
            diff_boxes = x

        return diff_boxes, noise, select_mask

    def prepare_targets(self,targets,images_whwh):
        labels = targets[..., :5]
        nlabel = (labels.sum(dim=2) > 0).sum(dim=1)  # number of objects
        new_targets = []
        diffused_boxes = []
        noises = []
        ts = []
        select_mask={}
        # select_t={}
        # select_gt_boxes={}
        for batch_idx,num_gt in enumerate(nlabel):
            target = {}
            gt_bboxes_per_image = box_cxcywh_to_xyxy(labels[batch_idx, :num_gt, 1:5])
            gt_classes = labels[batch_idx, :num_gt, 0]
            image_size_xyxy = images_whwh[batch_idx]
            gt_boxes = gt_bboxes_per_image  / image_size_xyxy
            # cxcywh
            gt_boxes = box_xyxy_to_cxcywh(gt_boxes)
            x_gt_boxes=gt_boxes
            d_t = torch.randint(0, self.num_timesteps, (1,), device=self.device).long()[0]
            ## baseline setting
            # if batch_idx<len(nlabel)//2:
            #     d_t = torch.randint(0, 40, (1,), device=self.device).long()[0]
            # else:
            #     d_t = torch.randint(0, self.num_timesteps, (1,), device=self.device).long()[0]
            # if select_t.get(batch_idx%(len(nlabel)//2),None) is not None:
            #     d_t=select_t.get(batch_idx%(len(nlabel)//2),None)
            # if select_gt_boxes.get(batch_idx%(len(nlabel)//2),None) is not None:
            #     x_gt_boxes=select_gt_boxes.get(batch_idx%(len(nlabel)//2),None)    
            d_boxes,d_noise,d_mask= self.prepare_diffusion_concat(x_gt_boxes,d_t,select_mask.get(batch_idx%(len(nlabel)//2),None))
            if d_mask is not None:
                select_mask[batch_idx%(len(nlabel)//2)]=d_mask
            # if d_t is not None:
            #     select_t[batch_idx%(len(nlabel)//2)]=d_t
            # if select_gt_boxes.get(batch_idx%(len(nlabel)//2),None) is None:
            #     select_gt_boxes[batch_idx%(len(nlabel)//2)]=gt_boxes 
            diffused_boxes.append(d_boxes)
            noises.append(d_noise)
            ts.append(d_t)
            target["labels"] = gt_classes.long()
            target["boxes"] = gt_boxes
            target["boxes_xyxy"] = gt_bboxes_per_image
            target["image_size_xyxy"] = image_size_xyxy
            image_size_xyxy_tgt = image_size_xyxy.unsqueeze(0).repeat(len(gt_boxes), 1)
            target["image_size_xyxy_tgt"] = image_size_xyxy_tgt
            new_targets.append(target)

        return new_targets, torch.stack(diffused_boxes), torch.stack(noises), torch.stack(ts)





================================================
FILE: diffusion/models/diffusion_losses.py
================================================
import torch
import torch.nn.functional as F
from torch import nn
from fvcore.nn import sigmoid_focal_loss_jit
import torchvision.ops as ops
from yolox.utils import box_ops
from yolox.utils.dist import get_world_size, is_dist_avail_and_initialized
from yolox.utils.box_ops import box_cxcywh_to_xyxy, box_xyxy_to_cxcywh, generalized_box_iou


class SetCriterionDynamicK(nn.Module):
    """ This class computes the loss for DiffusionDet.
    The process happens in two steps:
        1) we compute hungarian assignment between ground truth boxes and the outputs of the model
        2) we supervise each pair of matched ground-truth / prediction (supervise class and box)
    """
    def __init__(self,num_classes, matcher, weight_dict, eos_coef, losses, use_focal,use_fed_loss):
        """ Create the criterion.
        Parameters:
            num_classes: number of object categories, omitting the special no-object category
            matcher: module able to compute a matching between targets and proposals
            weight_dict: dict containing as key the names of the losses and as values their relative weight.
            eos_coef: relative classification weight applied to the no-object category
            losses: list of all the losses to be applied. See get_loss for list of available losses.
        """
        super().__init__()
        self.num_classes = num_classes
        self.matcher = matcher
        self.weight_dict = weight_dict
        self.eos_coef = eos_coef
        self.losses = losses
        self.use_focal = use_focal
        self.use_fed_loss = use_fed_loss
        if self.use_fed_loss:
            self.fed_loss_num_classes = 50
            from detectron2.data.detection_utils import get_fed_loss_cls_weights
            cls_weight_fun = lambda: get_fed_loss_cls_weights(dataset_names=cfg.DATASETS.TRAIN, freq_weight_power=cfg.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER)  # noqa
            fed_loss_cls_weights = cls_weight_fun()
            assert (
                    len(fed_loss_cls_weights) == self.num_classes
            ), "Please check the provided fed_loss_cls_weights. Their size should match num_classes"
            self.register_buffer("fed_loss_cls_weights", fed_loss_cls_weights)

        if self.use_focal:
            self.focal_loss_alpha = 0.25
            self.focal_loss_gamma = 2.0
        else:
            empty_weight = torch.ones(self.num_classes + 1)
            empty_weight[-1] = self.eos_coef
            self.register_buffer('empty_weight', empty_weight)

    # copy-paste from https://github.com/facebookresearch/detectron2/blob/main/detectron2/modeling/roi_heads/fast_rcnn.py#L356
    def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_classes, weight):
        """
        Args:
            gt_classes: a long tensor of shape R that contains the gt class label of each proposal.
            num_fed_loss_classes: minimum number of classes to keep when calculating federated loss.
            Will sample negative classes if number of unique gt_classes is smaller than this value.
            num_classes: number of foreground classes
            weight: probabilities used to sample negative classes
        Returns:
            Tensor:
                classes to keep when calculating the federated loss, including both unique gt
                classes and sampled negative classes.
        """
        unique_gt_classes = torch.unique(gt_classes)
        prob = unique_gt_classes.new_ones(num_classes + 1).float()
        prob[-1] = 0
        if len(unique_gt_classes) < num_fed_loss_classes:
            prob[:num_classes] = weight.float().clone()
            prob[unique_gt_classes] = 0
            sampled_negative_classes = torch.multinomial(
                prob, num_fed_loss_classes - len(unique_gt_classes), replacement=False
            )
            fed_loss_classes = torch.cat([unique_gt_classes, sampled_negative_classes])
        else:
            fed_loss_classes = unique_gt_classes
        return fed_loss_classes

    def loss_labels(self, outputs, targets, indices, num_boxes, log=False):
        """Classification loss (NLL)
        targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
        """
        assert 'pred_logits' in outputs
        src_logits = outputs['pred_logits']
        conf_score=torch.cat([outputs['pred_scores'],outputs['pred_scores']],dim=0)
        p=torch.sqrt(torch.sigmoid(src_logits)*conf_score)
        src_logits=torch.log(p/(1-p))
        batch_size = len(targets)

        # src_logits_re=torch.cat((src_logits[:batch_size//2],src_logits[batch_size//2:]),dim=0)
        # src_logits=(src_logits+src_logits_re)/2

        # idx = self._get_src_permutation_idx(indices)
        # target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
        target_classes = torch.full(src_logits.shape[:2], self.num_classes,
                                    dtype=torch.int64, device=src_logits.device)
        # src_logits_list = []
        target_classes_o_list = []
        # target_classes[idx] = target_classes_o
        for batch_idx in range(batch_size):
            valid_query = indices[batch_idx%(batch_size//2)][0]
            gt_multi_idx = indices[batch_idx%(batch_size//2)][1]
            if len(gt_multi_idx) == 0:
                continue
            # bz_src_logits = src_logits[batch_idx]
            target_classes_o = targets[batch_idx]["labels"]
            target_classes[batch_idx, valid_query] = target_classes_o[gt_multi_idx]

            # src_logits_list.append(bz_src_logits[valid_query])
            target_classes_o_list.append(target_classes_o[gt_multi_idx])

        if self.use_focal or self.use_fed_loss:
            num_boxes = torch.cat(target_classes_o_list).shape[0] if len(target_classes_o_list) != 0 else 1

            target_classes_onehot = torch.zeros([src_logits.shape[0], src_logits.shape[1], self.num_classes + 1],
                                                dtype=src_logits.dtype, layout=src_logits.layout,
                                                device=src_logits.device)
            target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)
            loss_ce=0
            gt_classes = torch.argmax(target_classes_onehot, dim=-1)
            target_classes_onehot = target_classes_onehot[:, :, :-1]
            target_classes_onehot = target_classes_onehot.flatten(0, 1)
            src_logits = src_logits.flatten(0, 1)
            if self.use_focal:
                cls_loss = sigmoid_focal_loss_jit(src_logits, target_classes_onehot, alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction="none")
            else:
                cls_loss = F.binary_cross_entropy_with_logits(src_logits, target_classes_onehot, reduction="none")
            if self.use_fed_loss:
                K = self.num_classes
                N = src_logits.shape[0]
                fed_loss_classes = self.get_fed_loss_classes(
                    gt_classes,
                    num_fed_loss_classes=self.fed_loss_num_classes,
                    num_classes=K,
                    weight=self.fed_loss_cls_weights,
                )
                fed_loss_classes_mask = fed_loss_classes.new_zeros(K + 1)
                fed_loss_classes_mask[fed_loss_classes] = 1
                fed_loss_classes_mask = fed_loss_classes_mask[:K]
                weight = fed_loss_classes_mask.view(1, K).expand(N, K).float()

                loss_ce += torch.sum(cls_loss * weight) / num_boxes
            else:
                loss_ce += torch.sum(cls_loss) / num_boxes

            losses = {'loss_ce': loss_ce}
        else:
            raise NotImplementedError

        return losses

    def loss_boxes(self, outputs, targets, indices, num_boxes):
        """Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss
           targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]
           The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size.
        """
        assert 'pred_boxes' in outputs
        # idx = self._get_src_permutation_idx(indices)
        src_boxes = outputs['pred_boxes']

        batch_size = len(targets)
        pred_box_list = []
        pred_norm_box_list = []
        tgt_box_list = []
        tgt_box_xyxy_list = []
        for batch_idx in range(batch_size):
            valid_query = indices[batch_idx%(batch_size//2)][0]
            gt_multi_idx = indices[batch_idx%(batch_size//2)][1]
            if len(gt_multi_idx) == 0:
                continue
            bz_image_whwh = targets[batch_idx]['image_size_xyxy']
            bz_src_boxes = src_boxes[batch_idx]
            bz_target_boxes = targets[batch_idx]["boxes"]  # normalized (cx, cy, w, h)
            bz_target_boxes_xyxy = targets[batch_idx]["boxes_xyxy"]  # absolute (x1, y1, x2, y2)
            pred_box_list.append(bz_src_boxes[valid_query])
            pred_norm_box_list.append(bz_src_boxes[valid_query] / bz_image_whwh)  # normalize (x1, y1, x2, y2)
            tgt_box_list.append(bz_target_boxes[gt_multi_idx])
            tgt_box_xyxy_list.append(bz_target_boxes_xyxy[gt_multi_idx])

        if len(pred_box_list) != 0:
            src_boxes = torch.cat(pred_box_list)
            src_boxes_re=torch.cat(pred_box_list[-len(pred_box_list)//2:]+pred_box_list[:len(pred_box_list)//2])
            src_boxes_norm = torch.cat(pred_norm_box_list)  # normalized (x1, y1, x2, y2)
            target_boxes = torch.cat(tgt_box_list)
            target_boxes_abs_xyxy = torch.cat(tgt_box_xyxy_list)
            target_boxes_abs_xyxy_re=torch.cat(tgt_box_xyxy_list[-len(tgt_box_xyxy_list)//2:]+tgt_box_xyxy_list[:len(tgt_box_xyxy_list)//2])
            num_boxes = src_boxes.shape[0]
            losses = {}
            # require normalized (x1, y1, x2, y2)
            loss_bbox = F.l1_loss(src_boxes_norm, box_cxcywh_to_xyxy(target_boxes), reduction='none')
            losses['loss_bbox'] = loss_bbox.sum() / num_boxes

            # loss_giou = giou_loss(box_ops.box_cxcywh_to_xyxy(src_boxes), box_ops.box_cxcywh_to_xyxy(target_boxes))
            loss_giou = 1 - torch.diag(box_ops.generalized_box_iou(src_boxes,src_boxes_re,target_boxes_abs_xyxy,target_boxes_abs_xyxy_re))
            losses['loss_giou'] = loss_giou.sum() / num_boxes
        else:
            losses = {'loss_bbox': outputs['pred_boxes'].sum() * 0,
                      'loss_giou': outputs['pred_boxes'].sum() * 0}

        return losses

    def _get_src_permutation_idx(self, indices):
        # permute predictions following indices
        batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
        src_idx = torch.cat([src for (src, _) in indices])
        return batch_idx, src_idx

    def _get_tgt_permutation_idx(self, indices):
        # permute targets following indices
        batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
        tgt_idx = torch.cat([tgt for (_, tgt) in indices])
        return batch_idx, tgt_idx

    def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
        loss_map = {
            'labels': self.loss_labels,
            'boxes': self.loss_boxes,
        }
        assert loss in loss_map, f'do you really want to compute {loss} loss?'
        return loss_map[loss](outputs, targets, indices, num_boxes, **kwargs)

    def forward(self, outputs, targets):
        """ This performs the loss computation.
        Parameters:
             outputs: dict of tensors, see the output specification of the model for the format
             targets: list of dicts, such that len(targets) == batch_size.
                      The expected keys in each dict depends on the losses applied, see each loss' doc
        """
        outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}

        # Retrieve the matching between the outputs of the last layer and the targets
        indices, _ = self.matcher(outputs_without_aux, targets)

        # Compute the average number of target boxes accross all nodes, for normalization purposes
        num_boxes = sum(len(t["labels"]) for t in targets)//2
        num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device)
        if is_dist_avail_and_initialized():
            torch.distributed.all_reduce(num_boxes)
        num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()

        # Compute all the requested losses
        losses = {}
        for loss in self.losses:
            losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes))

        # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
        if 'aux_outputs' in outputs:
            for i, aux_outputs in enumerate(outputs['aux_outputs']):
                indices, _ = self.matcher(aux_outputs, targets)
                for loss in self.losses:
                    if loss == 'masks':
                        # Intermediate masks losses are too costly to compute, we ignore them.
                        continue
                    kwargs = {}
                    if loss == 'labels':
                        # Logging is enabled only for the last layer
                        kwargs = {'log': False}
                    l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)
                    l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
                    losses.update(l_dict)

        return losses


class HungarianMatcherDynamicK(nn.Module):
    """This class computes an assignment between the targets and the predictions of the network
    For efficiency reasons, the targets don't include the no_object. Because of this, in general,
    there are more predictions than targets. In this case, we do a 1-to-k (dynamic) matching of the best predictions,
    while the others are un-matched (and thus treated as non-objects).
    """
    def __init__(self,  cost_class: float = 1, cost_bbox: float = 1, cost_giou: float = 1, cost_mask: float = 1, use_focal: bool = False,use_fed_loss=False):
        """Creates the matcher
        Params:
            cost_class: This is the relative weight of the classification error in the matching cost
            cost_bbox: This is the relative weight of the L1 error of the bounding box coordinates in the matching cost
            cost_giou: This is the relative weight of the giou loss of the bounding box in the matching cost
        """
        super().__init__()
        self.cost_class = cost_class
        self.cost_bbox = cost_bbox
        self.cost_giou = cost_giou
        self.use_focal = use_focal
        self.use_fed_loss = use_fed_loss
        self.ota_k = 5
        if self.use_focal:
            self.focal_loss_alpha = 0.25
            self.focal_loss_gamma = 2.0
        assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0,  "all costs cant be 0"

    def forward(self, outputs, targets):
        """ simOTA for detr"""
        with torch.no_grad():
            bs, num_queries = outputs["pred_logits"].shape[:2]
            conf_score=outputs["pred_scores"]
            # We flatten to compute the cost matrices in a batch
            pred_logits_pre,pred_logits_curr=torch.split(outputs["pred_logits"],bs//2,dim=0)
            out_bbox_pre,out_bbox_curr = torch.split(outputs["pred_boxes"],bs//2,dim=0)
            if self.use_focal or self.use_fed_loss:
                out_prob_pre = torch.sqrt(pred_logits_pre.sigmoid()*conf_score)  # [batch_size, num_queries, num_classes]
                out_prob_curr = torch.sqrt(pred_logits_curr.sigmoid()*conf_score)
            else:
                out_prob_pre = torch.sqrt(pred_logits_pre.softmax(-1)*conf_score) # [batch_size, num_queries, num_classes]
                out_prob_curr=torch.sqrt(pred_logits_curr.softmax(-1)*conf_score)
            indices = []
            matched_ids = []
            assert bs == len(targets)
            for batch_idx in range(bs//2):
                bz_boxes_pre = out_bbox_pre[batch_idx]  # [num_proposals, 4]
                bz_out_prob_pre = out_prob_pre[batch_idx]
                bz_boxes_curr = out_bbox_curr[batch_idx]  # [num_proposals, 4]
                bz_out_prob_curr = out_prob_curr[batch_idx]
                bz_tgt_ids_pre = targets[batch_idx]["labels"]
                bz_tgt_ids_curr = targets[batch_idx+bs//2]["labels"]
                num_insts = len(bz_tgt_ids_pre)
                assert len(bz_tgt_ids_curr)==num_insts,"YaHoo {},{}!".format(len(bz_tgt_ids_curr),num_insts)
                if num_insts == 0:  # empty object in key frame
                    non_valid = torch.zeros(bz_out_prob_pre.shape[0]).to(bz_out_prob_pre) > 0
                    indices_batchi = (non_valid, torch.arange(0, 0).to(bz_out_prob_pre))
                    matched_qidx = torch.arange(0, 0).to(bz_out_prob_pre)
                    indices.append(indices_batchi)
                    matched_ids.append(matched_qidx)
                    continue

                bz_gtboxs_pre = targets[batch_idx]['boxes']  # [num_gt, 4] normalized (cx, xy, w, h)
                bz_gtboxs_abs_xyxy_pre = targets[batch_idx]['boxes_xyxy']
                bz_gtboxs_curr = targets[batch_idx+bs//2]['boxes']  # [num_gt, 4] normalized (cx, xy, w, h)
                bz_gtboxs_abs_xyxy_curr = targets[batch_idx+bs//2]['boxes_xyxy']
                fg_mask_pre, is_in_boxes_and_center_pre = self.get_in_boxes_info(
                    box_xyxy_to_cxcywh(bz_boxes_pre),  # absolute (cx, cy, w, h)
                    box_xyxy_to_cxcywh(bz_gtboxs_abs_xyxy_pre),  # absolute (cx, cy, w, h)
                    expanded_strides=32
                )
                fg_mask_curr, is_in_boxes_and_center_curr = self.get_in_boxes_info(
                    box_xyxy_to_cxcywh(bz_boxes_curr),  # absolute (cx, cy, w, h)
                    box_xyxy_to_cxcywh(bz_gtboxs_abs_xyxy_curr),  # absolute (cx, cy, w, h)
                    expanded_strides=32
                )
                fg_mask=fg_mask_pre&fg_mask_curr 
                is_in_boxes_and_center=is_in_boxes_and_center_pre&is_in_boxes_and_center_curr

                pair_wise_ious_pre = ops.box_iou(bz_boxes_pre, bz_gtboxs_abs_xyxy_pre)
                pair_wise_ious_curr = ops.box_iou(bz_boxes_curr, bz_gtboxs_abs_xyxy_curr)
                pair_wise_ious=(pair_wise_ious_pre+pair_wise_ious_curr)/2
                cost_class=0
                bz_out_prob_set=[bz_out_prob_pre,bz_out_prob_curr]
                bz_tgt_ids_set=[bz_tgt_ids_pre,bz_tgt_ids_curr]
                # Compute the classification cost.
                if self.use_focal:
                    alpha = self.focal_loss_alpha
                    gamma = self.focal_loss_gamma
                    for bz_out_prob,bz_tgt_ids in zip(bz_out_prob_set,bz_tgt_ids_set):
                        neg_cost_class = (1 - alpha) * (bz_out_prob ** gamma) * (-(1 - bz_out_prob + 1e-8).log())
                        pos_cost_class = alpha * ((1 - bz_out_prob) ** gamma) * (-(bz_out_prob + 1e-8).log())
                        cost_class += pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]
                elif self.use_fed_loss:
                    # focal loss degenerates to naive one
                    for bz_out_prob,bz_tgt_ids in zip(bz_out_prob_set,bz_tgt_ids_set):
                        neg_cost_class = (-(1 - bz_out_prob + 1e-8).log())
                        pos_cost_class = (-(bz_out_prob + 1e-8).log())
                        cost_class += pos_cost_class[:, bz_tgt_ids] - neg_cost_class[:, bz_tgt_ids]
                else:
                    for bz_out_prob,bz_tgt_ids in zip(bz_out_prob_set,bz_tgt_ids_set):
                        cost_class += -bz_out_prob[:, bz_tgt_ids]

                # Compute the L1 cost between boxes
                # image_size_out = torch.cat([v["image_size_xyxy"].unsqueeze(0) for v in targets])
                # image_size_out = image_size_out.unsqueeze(1).repeat(1, num_queries, 1).flatten(0, 1)
                # image_size_tgt = torch.cat([v["image_size_xyxy_tgt"] for v in targets])

                bz_image_size_out_pre = targets[batch_idx]['image_size_xyxy']
                bz_image_size_tgt_pre = targets[batch_idx]['image_size_xyxy_tgt']
                bz_image_size_out_curr = targets[batch_idx+bs//2]['image_size_xyxy']
                bz_image_size_tgt_curr = targets[batch_idx+bs//2]['image_size_xyxy_tgt']

                bz_out_bbox_pre = bz_boxes_pre / bz_image_size_out_pre  # normalize (x1, y1, x2, y2)
                bz_out_bbox_curr = bz_boxes_curr / bz_image_size_out_curr  # normalize (x1, y1, x2, y2)
                bz_tgt_bbox_pre = bz_gtboxs_abs_xyxy_pre / bz_image_size_tgt_pre  # normalize (x1, y1, x2, y2)
                bz_tgt_bbox_curr = bz_gtboxs_abs_xyxy_curr / bz_image_size_tgt_curr  # normalize (x1, y1, x2, y2)
                cost_bbox_pre = torch.cdist(bz_out_bbox_pre, bz_tgt_bbox_pre, p=1)
                cost_bbox_curr = torch.cdist(bz_out_bbox_curr, bz_tgt_bbox_curr, p=1)

                cost_giou = -generalized_box_iou(bz_boxes_pre,bz_boxes_curr,bz_gtboxs_abs_xyxy_pre,bz_gtboxs_abs_xyxy_curr)

                # Final cost matrix
                cost = self.cost_bbox * (cost_bbox_pre+cost_bbox_curr)/2 + self.cost_class * cost_class/2 + self.cost_giou * cost_giou + 100.0 * (~is_in_boxes_and_center)
                assert not torch.any(torch.isnan(cost)),"Error nan value occurs"
                # cost = (cost_class + 3.0 * cost_giou + 100.0 * (~is_in_boxes_and_center))  # [num_query,num_gt]
                cost[~fg_mask] = cost[~fg_mask] + 10000.0

                # if bz_gtboxs.shape[0]>0:
                indices_batchi, matched_qidx = self.dynamic_k_matching(cost, pair_wise_ious, bz_gtboxs_pre.shape[0])

                indices.append(indices_batchi)
                matched_ids.append(matched_qidx)

        return indices, matched_ids

    def get_in_boxes_info(self, boxes, target_gts, expanded_strides):
        xy_target_gts = box_cxcywh_to_xyxy(target_gts)  # (x1, y1, x2, y2)

        anchor_center_x = boxes[:, 0].unsqueeze(1)
        anchor_center_y = boxes[:, 1].unsqueeze(1)

        # whether the center of each anchor is inside a gt box
        b_l = anchor_center_x > xy_target_gts[:, 0].unsqueeze(0)
        b_r = anchor_center_x < xy_target_gts[:, 2].unsqueeze(0)
        b_t = anchor_center_y > xy_target_gts[:, 1].unsqueeze(0)
        b_b = anchor_center_y < xy_target_gts[:, 3].unsqueeze(0)
        # (b_l.long()+b_r.long()+b_t.long()+b_b.long())==4 [300,num_gt] ,
        is_in_boxes = ((b_l.long() + b_r.long() + b_t.long() + b_b.long()) == 4)
        is_in_boxes_all = is_in_boxes.sum(1) > 0  # [num_query]
        # in fixed center
        center_radius = 2.5
        # Modified to self-adapted sampling --- the center size depends on the size of the gt boxes
        # https://github.com/dulucas/UVO_Challenge/blob/main/Track1/detection/mmdet/core/bbox/assigners/rpn_sim_ota_assigner.py#L212
        b_l = anchor_center_x > (target_gts[:, 0] - (center_radius * (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)
        b_r = anchor_center_x < (target_gts[:, 0] + (center_radius * (xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)
        b_t = anchor_center_y > (target_gts[:, 1] - (center_radius * (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)
        b_b = anchor_center_y < (target_gts[:, 1] + (center_radius * (xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)

        is_in_centers = ((b_l.long() + b_r.long() + b_t.long() + b_b.long()) == 4)
        is_in_centers_all = is_in_centers.sum(1) > 0

        is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
        is_in_boxes_and_center = (is_in_boxes & is_in_centers)

        return is_in_boxes_anchor, is_in_boxes_and_center

    def dynamic_k_matching(self, cost, pair_wise_ious, num_gt):
        matching_matrix = torch.zeros_like(cost)  # [300,num_gt]
        ious_in_boxes_matrix = pair_wise_ious
        n_candidate_k = self.ota_k

        # Take the sum of the predicted value and the top 10 iou of gt with the largest iou as dynamic_k
        topk_ious, _ = torch.topk(ious_in_boxes_matrix, n_candidate_k, dim=0)
        dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)

        for gt_idx in range(num_gt):
            _, pos_idx = torch.topk(cost[:, gt_idx], k=dynamic_ks[gt_idx].item(), largest=False)
            matching_matrix[:, gt_idx][pos_idx] = 1.0

        del topk_ious, dynamic_ks, pos_idx

        anchor_matching_gt = matching_matrix.sum(1)

        if (anchor_matching_gt > 1).sum() > 0:
            _, cost_argmin = torch.min(cost[anchor_matching_gt > 1], dim=1)
            matching_matrix[anchor_matching_gt > 1] *= 0
            matching_matrix[anchor_matching_gt > 1, cost_argmin,] = 1

        while (matching_matrix.sum(0) == 0).any():
            num_zero_gt = (matching_matrix.sum(0) == 0).sum()
            matched_query_id = matching_matrix.sum(1) > 0
            cost[matched_query_id] += 100000.0
            unmatch_id = torch.nonzero(matching_matrix.sum(0) == 0, as_tuple=False).squeeze(1)
            for gt_idx in unmatch_id:
                pos_idx = torch.argmin(cost[:, gt_idx])
                matching_matrix[:, gt_idx][pos_idx] = 1.0
            if (matching_matrix.sum(1) > 1).sum() > 0:  # If a query matches more than one gt
                _, cost_argmin = torch.min(cost[anchor_matching_gt > 1],
                                           dim=1)  # find gt for these queries with minimal cost
                matching_matrix[anchor_matching_gt > 1] *= 0  # reset mapping relationship
                matching_matrix[anchor_matching_gt > 1, cost_argmin,] = 1  # keep gt with minimal cost

        assert not (matching_matrix.sum(0) == 0).any()
        selected_query = matching_matrix.sum(1) > 0
        gt_indices = matching_matrix[selected_query].max(1)[1]
        assert selected_query.sum() == len(gt_indices)

        cost[matching_matrix == 0] = cost[matching_matrix == 0] + float('inf')
        matched_query_id = torch.min(cost, dim=0)[1]

        return (selected_query, gt_indices), matched_query_id


================================================
FILE: diffusion/models/diffusion_models.py
================================================
import copy
import math

import numpy as np
import torch
from torch import einsum, nn
import torch.nn.functional as F

from einops import rearrange, repeat
from einops_exts import rearrange_many



def exists(val):
    return val is not None
from detectron2.modeling.poolers import ROIPooler
from detectron2.structures import Boxes


_DEFAULT_SCALE_CLAMP = math.log(100000.0 / 16)



class SinusoidalPositionEmbeddings(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.dim = dim

    def forward(self, time):
        device = time.device
        half_dim = self.dim // 2
        embeddings = math.log(10000) / (half_dim - 1)
        embeddings = torch.exp(torch.arange(half_dim, device=device) * -embeddings)
        embeddings = time[:, None] * embeddings[None, :]
        embeddings = torch.cat((embeddings.sin(), embeddings.cos()), dim=-1)
        return embeddings


class GaussianFourierProjection(nn.Module):
    """Gaussian random features for encoding time steps."""

    def __init__(self, embed_dim, scale=30.):
        super().__init__()
        # Randomly sample weights during initialization. These weights are fixed
        # during optimization and are not trainable.
        self.W = nn.Parameter(torch.randn(embed_dim // 2) * scale, requires_grad=False)

    def forward(self, x):
        x_proj = x[:, None] * self.W[None, :] * 2 * np.pi
        return torch.cat([torch.sin(x_proj), torch.cos(x_proj)], dim=-1)


class Dense(nn.Module):
    """A fully connected layer that reshapes outputs to feature maps."""

    def __init__(self, input_dim, output_dim):
        super().__init__()
        self.dense = nn.Linear(input_dim, output_dim)

    def forward(self, x):
        return self.dense(x)


class DynamicHead(nn.Module):

    def __init__(self,
                num_classes,
                d_model,
                pooler_resolution,
                strides,
                in_channels,
                dim_feedforward = 2048,
                nhead = 8,
                dropout = 0.0,
                activation = "relu",
                num_heads = 6,
                return_intermediate=True,
                use_focal=False,
                use_fed_loss=False,
                prior_prob=0.01
                ):
        super().__init__()

        # Build RoI.
        box_pooler = self._init_box_pooler(pooler_resolution,strides,in_channels)
        self.box_pooler = box_pooler
        
        # Build heads.
        rcnn_head = RCNNHead(d_model, num_classes,pooler_resolution, dim_feedforward, nhead, dropout, activation,use_focal=use_focal,use_fed_loss=use_fed_loss)
        self.head_series = _get_clones(rcnn_head, num_heads)
        self.num_heads = num_heads
        self.return_intermediate = return_intermediate

        # Gaussian random feature embedding layer for time
        self.d_model = d_model
        time_dim = d_model * 4
        self.time_mlp = nn.Sequential(
            SinusoidalPositionEmbeddings(d_model),
            nn.Linear(d_model, time_dim),
            nn.GELU(),
            nn.Linear(time_dim, time_dim),
        )

        # Init parameters.
        self.use_focal = use_focal
        self.use_fed_loss = use_fed_loss
        self.num_classes = num_classes
        if self.use_focal or self.use_fed_loss:
            self.bias_value = -math.log((1 - prior_prob) / prior_prob)
        self._reset_parameters()

    def _reset_parameters(self):
        # init all parameters.
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

            # initialize the bias for focal loss and fed loss.
            if self.use_focal or self.use_fed_loss:
                if p.shape[-1] == self.num_classes or p.shape[-1] == self.num_classes + 1:
                    nn.init.constant_(p, self.bias_value)

    @staticmethod
    def _init_box_pooler(pooler_resolution,strides,in_channels):

        pooler_scales = [1/s for s in strides]
        sampling_ratio = 2
        pooler_type = "ROIAlignV2"

        # If StandardROIHeads is applied on multiple feature maps (as in FPN),
        # then we share the same predictors and therefore the channel counts must be the same
        # Check all channel counts are equal
        assert len(set(in_channels)) == 1, in_channels

        box_pooler = ROIPooler(
            output_size=pooler_resolution,
            scales=pooler_scales,
            sampling_ratio=sampling_ratio,
            pooler_type=pooler_type,
        )
        return box_pooler

    def forward(self,features,init_bboxes,t,lost_features=None,fix_ref_boxes=False):
        # assert t shape (batch_size)
        time = self.time_mlp(t)

        inter_class_logits = []
        inter_pred_bboxes = []
        inter_association_scores=[]

        bboxes = init_bboxes
        proposal_features = None
        
        for head_idx, rcnn_head in enumerate(self.head_series):
            class_logits, pred_bboxes, proposal_features ,association_score_logits= rcnn_head(features, bboxes, proposal_features,self.box_pooler,time,lost_features,fix_ref_boxes)
            if self.return_intermediate:
                inter_class_logits.append(torch.cat(class_logits,dim=0))
                inter_pred_bboxes.append(torch.cat(pred_bboxes,dim=0))
                inter_association_scores.append(torch.sigmoid(association_score_logits))
            bboxes = (pred_bbox.detach() for pred_bbox in pred_bboxes)

        if self.return_intermediate:
            return torch.stack(inter_class_logits), torch.stack(inter_pred_bboxes),torch.stack(inter_association_scores)

        return torch.cat(class_logits,dim=0)[None],torch.cat(pred_bboxes,dim=0)[None],torch.sigmoid(association_score_logits)[None]


class RCNNHead(nn.Module):

    def __init__(self,d_model, num_classes, pooler_resolution,dim_feedforward=2048, nhead=8, dropout=0.1, activation="relu",
                 scale_clamp: float = _DEFAULT_SCALE_CLAMP, bbox_weights=(2.0, 2.0, 1.0, 1.0),use_focal=False,use_fed_loss=False):
        super().__init__()

        self.d_model = d_model

        # dynamic.
        self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout,batch_first=True)
        # self.self_attn = FlashSelfAttention(d_model, nhead, attn_drop=dropout)
        # self.self_attn = WindowAttention(d_model,(8,8),nhead,attn_drop=dropout)
        # self.cross_attn = nn.MultiheadAttention(d_model,nhead,dropout=dropout)
        # self.stf=STF(dim=d_model)
        self.stf=SFT(d_model,pooler_resolution=pooler_resolution)

 
        self.linear1 = nn.Linear(d_model, dim_feedforward)
        self.dropout = nn.Dropout(dropout)
        self.linear2 = nn.Linear(dim_feedforward, d_model)

        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.norm3 = nn.LayerNorm(d_model)
        # self.norm4 = nn.LayerNorm(d_model)
        self.dropout1 = nn.Dropout(dropout)
        self.dropout2 = nn.Dropout(dropout)
        self.dropout3 = nn.Dropout(dropout)
        # self.dropout4 = nn.Dropout(dropout)

        self.activation = _get_activation_fn(activation)

        # block time mlp
        self.block_time_mlp = nn.Sequential(nn.SiLU(), nn.Linear(d_model * 4, d_model * 2))

        # cls.
        num_cls = 1
        cls_module = list()
        for _ in range(num_cls):
            cls_module.append(nn.Linear(d_model, d_model,False))
            cls_module.append(nn.LayerNorm(d_model))
            cls_module.append(nn.ReLU(inplace=True))
        self.cls_module = nn.ModuleList(cls_module)

        # association score.
        num_score = 1
        score_module = list()
        for _ in range(num_score):
            score_module.append(nn.Linear(2*d_model, d_model,False))
            score_module.append(nn.LayerNorm(d_model))
            score_module.append(nn.ReLU(inplace=True))
        self.score_module = nn.ModuleList(score_module)


        # reg.
        num_reg = 3
        reg_module = list()
        for _ in range(num_reg):
            reg_module.append(nn.Linear(d_model, d_model,True))
            reg_module.append(nn.LayerNorm(d_model))
            reg_module.append(nn.ReLU(inplace=True))
        self.reg_module = nn.ModuleList(reg_module)
        
        # pred.
        self.use_focal = use_focal
        self.use_fed_loss = use_fed_loss
        if self.use_focal or self.use_fed_loss:
            self.class_logits = nn.Linear(d_model, num_classes)
        else:
            self.class_logits = nn.Linear(d_model, num_classes + 1)
        self.score_logits=nn.Linear(d_model,1)
        self.bboxes_delta = nn.Linear(d_model, 4)
        self.scale_clamp = scale_clamp
        self.bbox_weights = bbox_weights
        nn.init.constant_(self.class_logits.bias,-math.log((1 - 1e-2) / 1e-2))
        nn.init.constant_(self.bboxes_delta.bias,-math.log((1 - 1e-2) / 1e-2))
        for sub_module in self.reg_module:
            if isinstance(sub_module,nn.Linear):
                nn.init.constant_(sub_module.bias,-math.log((1 - 1e-2) / 1e-2))

    def forward(self, features,bboxes,pro_features,pooler,time_emb,lost_features=None,fix_ref_boxes=False):
        """
        :param bboxes: (N, nr_boxes, 4)
        :param pro_features: (N, nr_boxes, d_model)
        """
        
        if pro_features is not None:
            # pro_features_pre,pro_features_curr=pro_features
            pro_features_x=pro_features
        else:
            pro_features_x=None
        
        bboxes_pre,bboxes_cur=bboxes
        
        N, nr_boxes = bboxes_pre.shape[:2]
        # rnd_idx = torch.randperm(nr_boxes)
        # bboxes_pre=bboxes_pre[:,rnd_idx,:]
        # bboxes_cur=bboxes_cur[:,rnd_idx,:]
        # roi_feature.
        proposal_boxes_pre = list()
        proposal_boxes_curr = list()
        for b in range(N):
            proposal_boxes_pre.append(Boxes(bboxes_pre[b]))
            proposal_boxes_curr.append(Boxes(bboxes_cur[b]))

        roi_features_pre = pooler(features[0], proposal_boxes_pre)
        if lost_features is not None:
            roi_features_pre[roi_features_pre.shape[0]-lost_features.shape[0]:]=lost_features
        roi_features_curr = pooler(features[1], proposal_boxes_curr)

        if pro_features_x is None:
            pro_features_pre = roi_features_pre.view(N, nr_boxes, self.d_model, -1).mean(-1)
            pro_features_curr=roi_features_curr.view(N, nr_boxes, self.d_model, -1).mean(-1)
            pro_features_x=torch.cat([pro_features_pre,pro_features_curr],dim=0)
        # else:
        #      pro_features_pre=pro_features_pre.reshape(N, nr_boxes, self.d_model)[:,rnd_idx,:]
        #      pro_features_curr=pro_features_curr.reshape(N, nr_boxes, self.d_model)[:,rnd_idx,:]
        roi_features_pre = roi_features_pre.view(N,nr_boxes, self.d_model, -1).permute(0,1,3,2)
        roi_features_curr = roi_features_curr.view(N,nr_boxes, self.d_model, -1).permute(0,1,3,2)

        roi_features_x=torch.cat([torch.cat([roi_features_pre,roi_features_curr],dim=-2).unsqueeze(2),
        torch.cat([roi_features_curr,roi_features_pre],dim=-2).unsqueeze(2)],dim=2)

        # self_att.
        pro_features_x = pro_features_x.view(2*N, nr_boxes, self.d_model)
        # pro_features_pre =pro_features_pre+ self.dropout1(self.self_attn(pro_features_pre, pro_features_pre, pro_features_pre,20,25))
        pro_features_x =pro_features_x+ self.dropout1(self.self_attn(pro_features_x, pro_features_x, value=pro_features_x)[0])
        # pro_features_x =pro_features_x+ self.dropout1(self.self_attn(pro_features_x))
        pro_features_x = self.norm1(pro_features_x)

        # pro_features_curr = pro_features_curr.view(N, nr_boxes, self.d_model).permute(1, 0, 2)
        # pro_features_curr = pro_features_curr+ self.dropout1(self.self_attn(pro_features_curr, pro_features_curr,value=pro_features_curr)[0])
        # # pro_features_curr = pro_features_curr+ self.dropout1(self.self_attn(pro_features_curr, pro_features_curr,pro_features_curr,20,25))
        # pro_features_curr = self.norm1(pro_features_curr)

        # cross_interact
        # pro_features_pre = pro_features_pre.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model)
        # pro_features_pre =pro_features_pre+self.dropout2(self.cross_interact(pro_features_pre, roi_features_curr))
        # pro_features_pre = self.norm2(pro_features_pre)

        # pro_features_curr = pro_features_curr.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model)
        # pro_features_curr =pro_features_curr+self.dropout2(self.cross_interact(pro_features_curr, roi_features_pre))
        # pro_features_curr = self.norm2(pro_features_curr)

        pro_features_x=torch.cat([x.unsqueeze(2) for x in pro_features_x.split(N,dim=0)],dim=-2)

        pro_features_x=pro_features_x+self.dropout2(self.stf(roi_features_x,pro_features_x))
        pro_features_x = self.norm2(pro_features_x)

        # roi_features_x=torch.cat([roi_features_curr.unsqueeze(2),roi_features_pre.unsqueeze(2)],dim=-2)
        # pro_features_x=pro_features_x+self.dropout4(self.stf2(roi_features_x,pro_features_x))
        # pro_features_x = self.norm4(pro_features_x)

        pro_features_x=torch.cat([x.squeeze(2) for x in pro_features_x.split(1,dim=-2)],dim=0).reshape(2*N*nr_boxes,-1)


        # inst_interact.
        # pro_features_pre = pro_features_pre.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model)
        # pro_features_pre =pro_features_pre+self.dropout3(self.inst_interact(pro_features_pre, roi_features_pre))
        # obj_features_pre = self.norm3(pro_features_pre)

        # # pro_features_curr = pro_features_curr.view(nr_boxes, N, self.d_model).permute(1, 0, 2).reshape(1, N * nr_boxes, self.d_model)
        # pro_features_curr =pro_features_curr+self.dropout3(self.inst_interact(pro_features_curr, roi_features_curr))
        # obj_features_curr = self.norm3(pro_features_curr)

        # obj_feature.
        obj_features_tmp =self.linear2(self.dropout(self.activation(self.linear1(pro_features_x))))
        obj_features=pro_features_x+self.dropout3(obj_features_tmp)
        obj_features= self.norm3(obj_features)

        # obj_features_curr_tmp =self.linear2(self.dropout(self.activation(self.linear1(obj_features_curr))))
        # obj_features_curr=obj_features_curr+self.dropout4(obj_features_curr_tmp)
        # obj_features_curr = self.norm4(obj_features_curr)
        
        # fc_feature_pre = obj_features_pre.transpose(0, 1).reshape(N * nr_boxes, -1)
        # fc_feature_curr = obj_features_curr.transpose(0, 1).reshape(N * nr_boxes, -1)

        # all_features=[fc_feature_pre,fc_feature_curr]

        # all_features=[]
        # for fc_feature,fc_time_emb in zip([fc_feature_pre,fc_feature_curr],time_emb.split(N,dim=0)):
        scale_shift = self.block_time_mlp(time_emb)
        scale_shift = torch.repeat_interleave(scale_shift, nr_boxes, dim=0)
        scale, shift = scale_shift.chunk(2, dim=1)
        fc_feature = obj_features * (scale + 1) + shift
        # all_features.append(fc_feature)
        

        cls_feature= fc_feature.clone()
        reg_feature= fc_feature.clone()
        score_feature= torch.cat(fc_feature.clone().split(N*nr_boxes,dim=0),dim=-1)

        for cls_layer in self.cls_module:
            cls_feature= cls_layer(cls_feature)

        for score_layer in self.score_module:
            score_feature=score_layer(score_feature)
    
        for reg_layer in self.reg_module:
            reg_feature= reg_layer(reg_feature)     
        
        class_logits = self.class_logits(cls_feature)
        bboxes_deltas= self.bboxes_delta(reg_feature)

        class_logits_pre,class_logits_curr=class_logits.split(N*nr_boxes,dim=0)
        bboxes_deltas_pre,bboxes_deltas_curr=bboxes_deltas.split(N*nr_boxes,dim=0)

        association_score=self.score_logits(score_feature)

        pred_bboxes_pre = self.apply_deltas(bboxes_deltas_pre, bboxes_pre.view(-1, 4))
        if fix_ref_boxes:
            assert not self.training,"fix reference bboxes only for inference mode"
            pred_bboxes_pre[:nr_boxes]=bboxes_pre[0,:nr_boxes]
        pred_bboxes_curr = self.apply_deltas(bboxes_deltas_curr, bboxes_cur.view(-1, 4))
            
        return (class_logits_pre.view(N, nr_boxes, -1),class_logits_curr.view(N, nr_boxes, -1)), (pred_bboxes_pre.view(N, nr_boxes, -1),pred_bboxes_curr.view(N, nr_boxes, -1)),obj_features,association_score.view(N, nr_boxes, -1)

    def apply_deltas(self, deltas, boxes):
        """
        Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.

        Args:
            deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.
                deltas[i] represents k potentially different class-specific
                box transformations for the single box boxes[i].
            boxes (Tensor): boxes to transform, of shape (N, 4)
        """
        boxes = boxes.to(deltas.dtype)

        widths = boxes[:, 2] - boxes[:, 0]
        heights = boxes[:, 3] - boxes[:, 1]
        ctr_x = boxes[:, 0] + 0.5 * widths
        ctr_y = boxes[:, 1] + 0.5 * heights

        wx, wy, ww, wh = self.bbox_weights
        dx = deltas[:, 0::4] / wx
        dy = deltas[:, 1::4] / wy
        dw = deltas[:, 2::4] / ww
        dh = deltas[:, 3::4] / wh

        # Prevent sending too large values into torch.exp()
        dw = torch.clamp(dw, max=self.scale_clamp)
        dh = torch.clamp(dh, max=self.scale_clamp)

        pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]
        pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]
        pred_w = torch.exp(dw) * widths[:, None]
        pred_h = torch.exp(dh) * heights[:, None]

        pred_boxes = torch.zeros_like(deltas)
        pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w  # x1
        pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h  # y1
        pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w  # x2
        pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h  # y2

        return pred_boxes


class SFT(nn.Module):

    def __init__(self, hidden_dim, pooler_resolution,dim_dynamic=2*64,num_dynamic=2):
        super().__init__()

        self.hidden_dim = hidden_dim
        self.dim_dynamic = dim_dynamic
        self.num_dynamic = num_dynamic
        self.pooler_resolution= pooler_resolution
        self.num_params = self.hidden_dim * self.dim_dynamic
        self.dynamic_layer = nn.Linear(self.hidden_dim, self.num_dynamic * self.num_params)

        self.norm1 = nn.LayerNorm(self.dim_dynamic)
        self.norm2 = nn.LayerNorm(self.hidden_dim)

        self.activation = nn.ReLU(inplace=True)

        num_output = 2*self.hidden_dim * self.pooler_resolution ** 2
        self.num_output= 2*self.pooler_resolution ** 2
        self.out_layer = nn.Linear(num_output, self.hidden_dim)
        self.norm3 = nn.LayerNorm(self.hidden_dim)

    def forward(self,roi_features,pro_features):
        '''
        pro_features: ( N,nr_boxes,2,self.d_model)
        roi_features: ( N,nr_boxes,2,49*2,self.d_model)
        '''
        N=pro_features.shape[0]
        # features=torch.cat([x.unsqueeze(2) for x in roi_features.split(self.num_output,dim=-2)],dim=2).reshape(-1,self.num_output,self.hidden_dim)
        features = roi_features.reshape(-1,self.num_output,self.hidden_dim)
        parameters = self.dynamic_layer(pro_features)

        param1 = parameters[:, :, :,:self.num_params].reshape(-1, self.hidden_dim, self.dim_dynamic)
        param2 = parameters[:, :, :,self.num_params:].reshape(-1, self.dim_dynamic, self.hidden_dim)

        features = torch.bmm(features, param1)
        features = self.norm1(features)
        features = self.activation(features) 

        features = torch.bmm(features, param2)
        features = self.norm2(features)
        features = self.activation(features)

        features = features.flatten(1)
        features = self.out_layer(features)
        features = self.norm3(features)
        features = self.activation(features)

        return features.reshape(N,-1,2,self.hidden_dim)
    

class PerceiverAttention(nn.Module):
    def __init__(self, *, dim, dim_head=64, heads=8):
        super().__init__()
        self.scale = dim_head**-0.5
        self.heads = heads
        inner_dim = dim_head * heads

        self.norm_media = nn.LayerNorm(dim)
        self.norm_latents = nn.LayerNorm(dim)

        self.to_q = nn.Linear(dim, inner_dim, bias=False)
        self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
        self.to_out = nn.Linear(inner_dim, dim, bias=False)

    def forward(self, x, latents):
        """
        Args:
            x (torch.Tensor): image features
                shape (b, T, n1, D)
            latent (torch.Tensor): latent features
                shape (b, T, n2, D)
        """
        x = self.norm_media(x)
        latents = self.norm_latents(latents)

        h = self.heads

        q = self.to_q(latents)
        kv_input = torch.cat((x, latents), dim=-2)
        k, v = self.to_kv(kv_input).chunk(2, dim=-1)
        q, k, v = rearrange_many((q, k, v), "b t n (h d) -> b h t n d", h=h)
        q = q * self.scale

        # attention
        sim = einsum("... i d, ... j d  -> ... i j", q, k)
        sim = sim - sim.amax(dim=-1, keepdim=True).detach()
        attn = sim.softmax(dim=-1)

        out = einsum("... i j, ... j d -> ... i d", attn, v)
        out = rearrange(out, "b h t n d -> b t n (h d)", h=h)
        return self.to_out(out)
    

def FeedForward(dim, mult=4):
    inner_dim = int(dim * mult)
    return nn.Sequential(
        nn.LayerNorm(dim),
        nn.Linear(dim, inner_dim, bias=False),
        nn.GELU(),
        nn.Linear(inner_dim, dim, bias=False),
    )


# class STF(nn.Module):
#     def __init__(
#         self,
#         *,
#         dim,
#         depth=2,
#         dim_head=64,
#         heads=8,
#         ff_mult=4,
#     ):
#         super().__init__()
#         # self.latents = nn.Parameter(torch.randn(num_latents, dim))

#         self.layers = nn.ModuleList([])
#         for _ in range(depth):
#             self.layers.append(
#                 nn.ModuleList(
#                     [
#                         PerceiverAttention(dim=dim, dim_head=dim_head, heads=heads),
#                         FeedForward(dim=dim, mult=ff_mult),
#                     ]
#                 )
#             )

#         self.norm = nn.LayerNorm(dim)

#     def forward(self,roi_features,pro_features):
#         '''
#         pro_features: ( N,nr_boxes,2,self.d_model)
#         roi_features: ( N,nr_boxes,2,49*2,self.d_model)
#         '''
#         b,n,x,dim=pro_features.shape

#         # blocks
#         latents=pro_features.reshape(b,n*x,1,-1)
#         roi_features=roi_features.reshape(b,n*x,-1,dim)
#         for attn, ff in self.layers:
#             latents = attn(roi_features, latents) + latents
#             latents = ff(latents) + latents

#         return self.norm(latents).reshape(b,n,x,dim)
        

class WindowAttention(nn.Module):
    """ Window based multi-head self attention (W-MSA) module with relative position bias.
    It supports both of shifted and non-shifted window.
    Args:
        dim (int): Number of input channels.
        window_size (tuple[int]): The height and width of the window.
        num_heads (int): Number of attention heads.
        qkv_bias (bool, optional):  If True, add a learnable bias to query, key, value. Default: True
        qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
        attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
        proj_drop (float, optional): Dropout ratio of output. Default: 0.0
    """

    def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):

        super().__init__()
        self.dim = dim
        self.window_size = window_size  # Wh, Ww
        self.num_heads = num_heads
        head_dim = dim // num_heads
        self.scale = qk_scale or head_dim ** -0.5


        self.to_q = nn.Linear(dim, dim, bias=qkv_bias)
        self.to_k = nn.Linear(dim, dim, bias=qkv_bias)
        self.to_v = nn.Linear(dim, dim, bias=qkv_bias)
        self.attn_drop = nn.Dropout(attn_drop)
        self.proj = nn.Linear(dim, dim)
        self.proj_drop = nn.Dropout(proj_drop)

        # trunc_normal_(self.relative_position_bias_table, std=.02)
        self.softmax = nn.Softmax(dim=-1)

    def forward(self,q,k,v,H,W):
        """ Forward function.
        Args:
            x: input features with shape of (num_windows*B, N, C)
            mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
        """
        B_, N, C = q.shape
        assert N==k.shape[1] and N==v.shape[1],"query,key and value must have equal length"
        pad_l = pad_t = 0
        pad_r = (self.window_size[1] - W % self.window_size[1]) % self.window_size[1]
        pad_b = (self.window_size[0] - H % self.window_size[0]) % self.window_size[0]
        Hp, Wp=0,0
        def mode_charge(x):
            x = x.reshape(B_, H, W, C)

            x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
            _, Hp, Wp, _ = x.shape

            x = window_partition(x, self.window_size[0])  # nW*B, window_size, window_size, C
            x = x.view(-1, self.window_size[1] * self.window_size[0], C)  # nW*B, window_size*window_size, C
            return x,Hp,Wp
        (q,Hp,Wp),(k,_,_),(v,_,_)=mode_charge(q),mode_charge(k),mode_charge(v)
        B_w = q.shape[0]
        N_w = q.shape[1]
        q= self.to_q(q).reshape(B_w, N_w, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0]
        k= self.to_k(k).reshape(B_w, N_w, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0]
        v= self.to_v(v).reshape(B_w, N_w, 1, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)[0]

        q = q * self.scale
        attn = (q @ k.transpose(-2, -1))

        attn = self.softmax(attn)

        attn = self.attn_drop(attn)

        x = (attn @ v).transpose(1, 2).reshape(B_w, N_w, C)
        x = self.proj(x)
        x = self.proj_drop(x)

        x = x.view(-1, self.window_size[1], self.window_size[0], C)
        x = window_reverse(x, self.window_size[0], Hp, Wp)  # B H' W' C
 
        if pad_r > 0 or pad_b > 0:
            x = x[:, :H, :W, :].contiguous()

        x = x.view(B_, H * W, C)

        return x
    

    
def _get_clones(module, N):
    return nn.ModuleList([copy.deepcopy(module) for i in range(N)])


def _get_activation_fn(activation):
    """Return an activation function given a string"""
    if activation == "relu":
        return F.relu
    if activation == "gelu":
        return F.gelu
    if activation == "glu":
        return F.glu
    raise RuntimeError(F"activation should be relu/gelu, not {activation}.")

def window_partition(x, window_size):
    """
    Args:
        x: (B, H, W, C)
        window_size (int): window size
    Returns:
        windows: (num_windows*B, window_size, window_size, C)
    """
    B, H, W, C = x.shape
    x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
    windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
    return windows


def window_reverse(windows, window_size, H, W):
    """
    Args:
        windows: (num_windows*B, window_size, window_size, C)
        window_size (int): Window size
        H (int): Height of image
        W (int): Width of image
    Returns:
        x: (B, H, W, C)
    """
    B = int(windows.shape[0] / (H * W / window_size / window_size))
    x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
    x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
    return x

# from flash_attn import flash_attn_qkvpacked_func, flash_attn_func
# class FlashSelfAttention(nn.Module):

#     def __init__(self, dim,num_heads, qkv_bias=True, qk_scale=None, attn_drop=0., proj_drop=0.):

#         super().__init__()
#         self.dim = dim
#         self.num_heads = num_heads
#         head_dim = dim // num_heads
#         self.scale = qk_scale or head_dim ** -0.5

#         # self.in_proj = nn.Linear(dim, 3*dim, bias=qkv_bias)
#         self.in_proj_weight = nn.Parameter(torch.empty((3 * dim,dim)))
#         if qkv_bias:
#             self.in_proj_bias = nn.Parameter(torch.empty(3 * dim))
#         else:
#             self.register_parameter('in_proj_bias', None)
#         self.attn_drop = nn.Dropout(attn_drop)
#         self.out_proj = nn.Linear(dim, dim)
#         self.proj_drop = nn.Dropout(proj_drop)


#     def forward(self,x):
#         """
#         x: B,N,C
#         """
#         B_, N, C = x.shape
#         qkv=F.linear(x, self.in_proj_weight , self.in_proj_bias).reshape(B_,N,3,self.num_heads,-1)
#         x=flash_attn_qkvpacked_func(qkv,self.attn_drop.p if self.training else 0.0,softmax_scale=self.scale).reshape(B_,N,-1)
#         x=self.out_proj(x)
#         x=self.proj_drop(x)
#         return x


================================================
FILE: diffusion/models/diffusionnet.py
================================================
import math
import random
from typing import List
from collections import namedtuple

import torch
import torch.nn.functional as F
from torch import nn
from yolox.models.yolo_pafpn import YOLOPAFPN
from .diffusion_head import DiffusionHead
from yolox.models.network_blocks import BaseConv

class DiffusionNet(nn.Module):
    """
    Implement DiffusionNet
    """

    def __init__(self, backbone=None, head=None, act="silu"):
        super().__init__()
        self.backbone=backbone
        self.head=head
        self.projs=nn.ModuleList()
        in_channels=backbone.in_channels
        for i in range(len(in_channels)):
            self.projs.append(
                BaseConv(
                    in_channels=int(in_channels[i] * head.width),
                    out_channels=int(head.hidden_dim),
                    ksize=1,
                    stride=1,
                    act=act,
                ))

    def forward(self, x, targets=(None,None),random_flip=False,input_size=None):
        # fpn output content features of [dark3, dark4, dark5]
        # x format (pre_imgs,cur_imgs) (B,C,H,W)
        # targets format (pre_targets,cur_targets) (B,N,5) class cx cy w h
        pre_imgs,cur_imgs=x
        pre_targets,cur_targets=targets
        mate_info=(pre_imgs.shape,pre_imgs.device,pre_imgs.dtype)
        bs,_,_,_=mate_info[0]
        if cur_imgs is None:
            x_input=pre_imgs
        else:
            x_input=torch.cat([pre_imgs,cur_imgs],dim=0)

        fpn_outs = self.backbone(x_input)
        flip_mode=False
        if random_flip and torch.randn((1,1))[0]>0.5:
            flip_mode=True
        pre_features,cur_features=[],[]
        
        for proj,x_out in zip(self.projs,fpn_outs):
            l_feat=proj(x_out)
            if cur_imgs is None:
                pre_features.append(l_feat)
                if flip_mode:
                    cur_features.append(torch.flip(l_feat,dims=[3]))
                else:
                    cur_features.append(l_feat.clone())
            else:
                pre_l_feat,cur_l_feat=l_feat.split(bs,dim=0)
                pre_features.append(pre_l_feat)
                cur_features.append(cur_l_feat)

        features=(pre_features,cur_features)

        if self.training:
            assert pre_targets is not None
            if cur_targets is None:
                cur_targets=pre_targets.clone()
                if flip_mode:
                    nlabels=(cur_targets.sum(-1)>0).sum(-1)
                    for idx,nlabel in enumerate(nlabels):
                        cur_targets[idx,:nlabel,1]=input_size[1]-cur_targets[idx,:nlabel,1]
            loss_dict = self.head(
                features,mate_info,targets=torch.cat([pre_targets,cur_targets],dim=0))
            if 'total_loss' not in loss_dict:
                loss_dict['total_loss']=sum(loss_dict.values())
            outputs=loss_dict
            return outputs
        else:  
            outputs = self.head(features,mate_info,targets=pre_targets)

        return outputs




================================================
FILE: exps/default/nano.py
================================================
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.

import os
import torch.nn as nn

from yolox.exp import Exp as MyExp


class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.depth = 0.33
        self.width = 0.25
        self.scale = (0.5, 1.5)
        self.random_size = (10, 20)
        self.test_size = (416, 416)
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.enable_mixup = False

    def get_model(self, sublinear=False):

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03
        if "model" not in self.__dict__:
            from yolox.models import YOLOX, YOLOPAFPN, YOLOXHead
            in_channels = [256, 512, 1024]
            # NANO model use depthwise = True, which is main difference.
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels, depthwise=True)
            head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels, depthwise=True)
            self.model = YOLOX(backbone, head)

        self.model.apply(init_yolo)
        self.model.head.initialize_biases(1e-2)
        return self.model


================================================
FILE: exps/default/yolov3.py
================================================
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.

import os
import torch
import torch.nn as nn

from yolox.exp import Exp as MyExp


class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.depth = 1.0
        self.width = 1.0
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]

    def get_model(self, sublinear=False):
        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03
        if "model" not in self.__dict__:
            from yolox.models import YOLOX, YOLOFPN, YOLOXHead
            backbone = YOLOFPN()
            head = YOLOXHead(self.num_classes, self.width, in_channels=[128, 256, 512], act="lrelu")
            self.model = YOLOX(backbone, head)
        self.model.apply(init_yolo)
        self.model.head.initialize_biases(1e-2)

        return self.model

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from data.datasets.cocodataset import COCODataset
        from data.datasets.mosaicdetection import MosaicDetection
        from data.datasets.data_augment import TrainTransform
        from data.datasets.dataloading import YoloBatchSampler, DataLoader, InfiniteSampler
        import torch.distributed as dist

        dataset = COCODataset(
                data_dir='data/COCO/',
                json_file=self.train_ann,
                img_size=self.input_size,
                preproc=TrainTransform(
                    rgb_means=(0.485, 0.456, 0.406),
                    std=(0.229, 0.224, 0.225),
                    max_labels=50
                ),
        )

        dataset = MosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=120
            ),
            degrees=self.degrees,
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = InfiniteSampler(len(self.dataset), seed=self.seed if self.seed else 0)
        else:
            sampler = torch.utils.data.RandomSampler(self.dataset)

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader


================================================
FILE: exps/default/yolox_l.py
================================================
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.

import os

from yolox.exp import Exp as MyExp


class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.depth = 1.0
        self.width = 1.0
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]


================================================
FILE: exps/default/yolox_m.py
================================================
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.

import os

from yolox.exp import Exp as MyExp


class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.depth = 0.67
        self.width = 0.75
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]


================================================
FILE: exps/default/yolox_s.py
================================================
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.

import os

from yolox.exp import Exp as MyExp


class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.depth = 0.33
        self.width = 0.50
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]


================================================
FILE: exps/default/yolox_tiny.py
================================================
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.

import os

from yolox.exp import Exp as MyExp


class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.depth = 0.33
        self.width = 0.375
        self.scale = (0.5, 1.5)
        self.random_size = (10, 20)
        self.test_size = (416, 416)
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.enable_mixup = False


================================================
FILE: exps/default/yolox_x.py
================================================
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.

import os

from yolox.exp import Exp as MyExp


class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]


================================================
FILE: exps/example/mot/yolox_x_diffusion_det_dancetrack.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train.json"
        self.val_ann = "train.json"
        self.input_size = (896, 1600)
        self.test_size = (896, 1600)
        self.random_size = (18, 32)
        self.max_epoch = 20
        self.print_interval = 20
        self.eval_interval = 40
        self.no_aug_epochs = 5
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="detection"
        self.enable_mixup = True
        self.seed=8823
        self.conf_thresh=0.4
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "dancetrack"),
            json_file=self.train_ann,
            name='train',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = MosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "dancetrack"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='train',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: exps/example/mot/yolox_x_diffusion_det_mot17.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train.json"
        self.val_ann = "val_half.json"
        self.input_size = (800, 1440)
        self.test_size = (800, 1440)
        self.random_size = (18, 32)
        self.max_epoch = 30
        self.print_interval = 20
        self.eval_interval = 5
        self.no_aug_epochs = 10
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="detection"
        self.enable_mixup = True
        self.seed=8823
        self.conf_thresh=0.4
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mix_det"),
            json_file=self.train_ann,
            name='',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = MosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mot"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='train',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: exps/example/mot/yolox_x_diffusion_det_mot17_ablation.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train_half.json"
        self.val_ann = "val_half.json"
        self.input_size = (800, 1440)
        self.test_size = (800, 1440)
        self.random_size = (18, 32)
        self.max_epoch = 30
        self.print_interval = 20
        self.eval_interval = 5
        self.no_aug_epochs = 10
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="detection"
        self.enable_mixup = True
        self.seed=8823
        self.conf_thresh=0.4
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mix_mot_ch"),
            json_file=self.train_ann,
            name='',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = MosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mot"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='train',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: exps/example/mot/yolox_x_diffusion_det_mot20.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train.json"
        self.val_ann = "val_half.json"
        self.input_size = (896, 1600)
        self.test_size = (896, 1600)
        self.random_size = (20, 36)
        self.max_epoch = 30
        self.print_interval = 20
        self.eval_interval = 5
        self.no_aug_epochs = 10
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="detection"
        self.enable_mixup = True
        self.seed=8823
        self.conf_thresh=0.4
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mix_mot20_ch"),
            json_file=self.train_ann,
            name='',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = MosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1200, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "MOT20"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='train',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1200, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: exps/example/mot/yolox_x_diffusion_track_dancetrack.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train.json"
        self.val_ann = "train.json"
        self.input_size = (896, 1600)
        self.test_size = (896, 1600)
        self.random_size = (18, 32)
        self.max_epoch = 20
        self.print_interval = 20 
        self.eval_interval = 40
        self.no_aug_epochs = 5
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="tracking"
        self.seed=8823
        self.conf_thresh=0.4
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
            DiffusionMosaicDetection,
            DiffusionTrainTransform
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "dancetrack"),
            json_file=self.train_ann,
            name='train',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = DiffusionMosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=DiffusionTrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "dancetrack"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='train',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: exps/example/mot/yolox_x_diffusion_track_dancetrack_baseline.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train.json"
        self.val_ann = "test.json"
        self.input_size = (896, 1600)
        self.test_size = (896, 1600)
        self.random_size = (18, 32)
        self.max_epoch = 20
        self.print_interval = 20 
        self.eval_interval = 40
        self.no_aug_epochs = 5
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="tracking"
        self.seed=8823
        self.conf_thresh=0.4
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
            DiffusionMosaicDetection,
            DiffusionTrainTransform
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "dancetrack"),
            json_file=self.train_ann,
            name='train',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = DiffusionMosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=DiffusionTrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "dancetrack"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='test',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: exps/example/mot/yolox_x_diffusion_track_mot17.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train.json"
        self.val_ann = "val_half.json"
        self.input_size = (800, 1440)
        self.test_size = (800, 1440)
        self.random_size = (18, 32)
        self.max_epoch = 30
        self.print_interval = 20 
        self.eval_interval = 5
        self.no_aug_epochs = 10
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="tracking"
        self.seed=8823
        self.conf_thresh=0.4
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
            DiffusionMosaicDetection,
            DiffusionTrainTransform
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mot"),
            json_file=self.train_ann,
            name='train',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = DiffusionMosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=DiffusionTrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mot"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='train',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train_half.json"
        self.val_ann = "val_half.json"
        self.input_size = (800, 1440)
        self.test_size = (800, 1440)
        self.random_size = (18, 32)
        self.max_epoch = 30
        self.print_interval = 20
        self.eval_interval = 5
        self.no_aug_epochs = 10
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="tracking"
        self.enable_mixup = True
        self.seed=8823
        self.conf_thresh=0.25
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
            DiffusionMosaicDetection,
            DiffusionTrainTransform
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mot"),
            json_file=self.train_ann,
            name='train',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = DiffusionMosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=DiffusionTrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mot"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='train',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: exps/example/mot/yolox_x_diffusion_track_mot17_baseline.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train.json"
        self.val_ann = "test.json"
        self.input_size = (800, 1440)
        self.test_size = (800, 1440)
        self.random_size = (18, 32)
        self.max_epoch = 30
        self.print_interval = 20 
        self.eval_interval = 5
        self.no_aug_epochs = 10
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="tracking"
        self.seed=8823
        self.conf_thresh=0.4
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
            DiffusionMosaicDetection,
            DiffusionTrainTransform
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mot"),
            json_file=self.train_ann,
            name='train',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = DiffusionMosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=DiffusionTrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "mot"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='test',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1000, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: exps/example/mot/yolox_x_diffusion_track_mot20.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train.json"
        self.val_ann = "val_half.json"
        self.input_size = (896, 1600)
        self.test_size = (896, 1600)
        self.random_size = (20, 36)
        self.max_epoch = 30
        self.print_interval = 20
        self.eval_interval = 5
        self.no_aug_epochs = 10
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="tracking"
        self.seed=8823
        self.conf_thresh=0.4
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
            DiffusionMosaicDetection,
            DiffusionTrainTransform
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "MOT20"),
            json_file=self.train_ann,
            name='train',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = DiffusionMosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=DiffusionTrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1200, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "MOT20"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='train',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1200, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: exps/example/mot/yolox_x_diffusion_track_mot20_baseline.py
================================================
# encoding: utf-8
import os
import random
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.optim import AdamW
from yolox.exp import Exp as MyExp
from yolox.data import get_yolox_datadir

class Exp(MyExp):
    def __init__(self):
        super(Exp, self).__init__()
        self.num_classes = 1
        self.depth = 1.33
        self.width = 1.25
        self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
        self.train_ann = "train.json"
        self.val_ann = "val_half.json"
        self.input_size = (896, 1600)
        self.test_size = (896, 1600)
        self.random_size = (20, 36)
        self.max_epoch = 30
        self.print_interval = 20
        self.eval_interval = 5
        self.no_aug_epochs = 10
        self.basic_lr_per_img = 0.001 / 64.0
        self.warmup_epochs = 1
        self.task="tracking"
        self.seed=8823
        self.conf_thresh=0.4
        self.det_thresh=0.7
        self.nms_thresh2d=0.75
        self.nms_thresh3d=0.7
        self.interval=5

    def get_data_loader(self, batch_size, is_distributed, no_aug=False):
        from yolox.data import (
            MOTDataset,
            TrainTransform,
            YoloBatchSampler,
            DataLoader,
            InfiniteSampler,
            MosaicDetection,
            DiffusionMosaicDetection,
            DiffusionTrainTransform
        )

        dataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "MOT20"),
            json_file=self.train_ann,
            name='train',
            img_size=self.input_size,
            preproc=TrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=500,
            ),
        )

        dataset = DiffusionMosaicDetection(
            dataset,
            mosaic=not no_aug,
            img_size=self.input_size,
            preproc=DiffusionTrainTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1200, 
            ),
            degrees=self.degrees, 
            translate=self.translate,
            scale=self.scale,
            shear=self.shear,
            perspective=self.perspective,
            enable_mixup=self.enable_mixup,
        )

        self.dataset = dataset

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()

        sampler = InfiniteSampler(
            len(self.dataset), seed=self.seed if self.seed else 0
        )

        batch_sampler = YoloBatchSampler(
            sampler=sampler,
            batch_size=batch_size,
            drop_last=False,
            input_dimension=self.input_size,
            mosaic=not no_aug,
        )

        dataloader_kwargs = {"num_workers": self.data_num_workers, "pin_memory": True}
        dataloader_kwargs["batch_sampler"] = batch_sampler
        train_loader = DataLoader(self.dataset, **dataloader_kwargs)

        return train_loader

    def get_eval_loader(self, batch_size, is_distributed, testdev=False):
        from yolox.data import MOTDataset,DiffusionValTransform

        valdataset = MOTDataset(
            data_dir=os.path.join(get_yolox_datadir(), "MOT20"),
            json_file=self.val_ann,
            img_size=self.test_size,
            name='train',
            preproc=DiffusionValTransform(
                rgb_means=(0.485, 0.456, 0.406),
                std=(0.229, 0.224, 0.225),
                max_labels=1200, 
            )
        )

        if is_distributed:
            batch_size = batch_size // dist.get_world_size()
            sampler = torch.utils.data.distributed.DistributedSampler(
                valdataset, shuffle=False
            )
        else:
            sampler = torch.utils.data.SequentialSampler(valdataset)

        dataloader_kwargs = {
            "num_workers": self.data_num_workers,
            "pin_memory": True,
            "sampler": sampler,
        }
        dataloader_kwargs["batch_size"] = batch_size
        val_loader = torch.utils.data.DataLoader(valdataset, **dataloader_kwargs)

        return val_loader

    def get_evaluator(self, batch_size, is_distributed, testdev=False):
        from yolox.evaluators import COCOEvaluator

        val_loader = self.get_eval_loader(batch_size, is_distributed, testdev=testdev)
        evaluator = COCOEvaluator(
            dataloader=val_loader,
            img_size=self.test_size,
            confthre=self.conf_thresh,
            nmsthre3d=self.nms_thresh3d,
            detthre=self.det_thresh,
            nmsthre2d=self.nms_thresh2d,
            num_classes=self.num_classes,
            testdev=testdev,
        )
        return evaluator
    
    def get_model(self):
        from yolox.models import YOLOPAFPN, YOLOX, YOLOXHead
        from diffusion.models.diffusionnet import DiffusionNet,DiffusionHead

        def init_yolo(M):
            for m in M.modules():
                if isinstance(m, nn.BatchNorm2d):
                    m.eps = 1e-3
                    m.momentum = 0.03

        if getattr(self, "model", None) is None:
            in_channels = [256, 512, 1024]
            backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
            for value in backbone.parameters():
                value.requires_grad=False
            head=DiffusionHead(self.num_classes,self.width)
            self.model = DiffusionNet(backbone, head)

        self.model.apply(init_yolo)
        # self.model.head.initialize_biases(1e-2)
        return self.model

    def get_optimizer(self, batch_size):
        lr=2.5e-05
        weight_decay = 0.0001
        self.optimizer=AdamW(self.model.parameters(),lr=lr,weight_decay=weight_decay) 
        return self.optimizer


================================================
FILE: requirements.txt
================================================
numpy
torch>=1.7
opencv_python
loguru
scikit-image
tqdm
torchvision>=0.10.0
Pillow
thop
ninja
tabulate
tensorboard
lap
motmetrics
filterpy
h5py


================================================
FILE: setup.py
================================================
#!/usr/bin/env python
# Copyright (c) Megvii, Inc. and its affiliates. All Rights Reserved

import re
import setuptools
import glob
from os import path
import torch
from torch.utils.cpp_extension import CppExtension

torch_ver = [int(x) for x in torch.__version__.split(".")[:2]]
assert torch_ver >= [1, 3], "Requires PyTorch >= 1.3"


def get_extensions():
    this_dir = path.dirname(path.abspath(__file__))
    extensions_dir = path.join(this_dir, "yolox", "layers", "csrc")

    main_source = path.join(extensions_dir, "vision.cpp")
    sources = glob.glob(path.join(extensions_dir, "**", "*.cpp"))

    sources = [main_source] + sources
    extension = CppExtension

    extra_compile_args = {"cxx": ["-O3"]}
    define_macros = []

    include_dirs = [extensions_dir]

    ext_modules = [
        extension(
            "yolox._C",
            sources,
            include_dirs=include_dirs,
            define_macros=define_macros,
            extra_compile_args=extra_compile_args,
        )
    ]

    return ext_modules


with open("yolox/__init__.py", "r") as f:
    version = re.search(
        r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
        f.read(), re.MULTILINE
    ).group(1)


# with open("README.md", "r") as f:
#     long_description = f.read()

long_description="sss"
setuptools.setup(
    name="yolox",
    version=version,
    author="basedet team",
    python_requires=">=3.6",
    long_description=long_description,
    ext_modules=get_extensions(),
    classifiers=["Programming Language :: Python :: 3", "Operating System :: OS Independent"],
    cmdclass={"build_ext": torch.utils.cpp_extension.BuildExtension},
    packages=setuptools.find_namespace_packages(),
)


================================================
FILE: tools/convert_bdd100k_to_coco.py
================================================
import cv2
import os
import json
import tqdm
import numpy as np

labels_path = 'datasets/bdd100k/labels'
img_path = 'datasets/bdd100k/images'
# mot_labels_path  = '/data/yourname/BDD100K-MOT/GT'

out_path = 'datasets/bdd100k/annotations/'

split = ['train']
categories = [
    {"id": 1, "name": "pedestrian"},
    {"id": 2, "name": "rider"},
    {"id": 3, "name": "car"},
    {"id": 4, "name": "truck"},
    {"id": 5, "name": "bus"},
    {"id": 6, "name": "train"},
    {"id": 7, "name": "motorcycle"},
    {"id": 8, "name": "bicycle"},
    # {"id": 9, "name": "traffic light"},
    # {"id": 10, "name": "traffic sign"},
]

# "traffic light":9, "traffic sign":10
cat = {"pedestrian":1, "rider":2, "car":3, "truck":4, "bus":5, "train":6, "motorcycle":7, "bicycle":8,}
# 1: pedestrian
# 2: rider
# 3: car
# 4: truck
# 5: bus
# 6: train
# 7: motorcycle
# 8: bicycle  
# 9: traffic light --- Don't need tracking
# 10: traffic sign  ---   Don't need tracking
# For MOT and MOTS, only the first 8 classes are used and evaluated

def read_tid_num_per_video(video_ann_dir):
    anns = np.loadtxt(video_ann_dir, dtype=np.float32, delimiter=',')
    max_tid = max(anns[:, 1])
    return int(max_tid)
    

for s in split:
    img_id = 1; ann_id = 1; video_cnt = 0; 
    tid_cnt = 0 
    images = []; annotations=[]; videos = []
    all_video=[d for d in os.listdir(os.path.join(labels_path, s)) if '.json' in d]
    need_index=np.random.choice(range(len(all_video)),len(all_video)//3,replace=False)
    video_labels_list = [all_video[i] for i in need_index]
    
    for v_label in tqdm.tqdm(video_labels_list):
        video_cnt += 1
        video = {'id': video_cnt, 'file_name':v_label[:-5]}
        videos.append(video)
        
        v_lab_path = os.path.join(os.path.join(labels_path, s, v_label))
        with open(v_lab_path, 'r') as f:
            annos=json.load(f)# anns per video
        num_frames  = len(annos)# the number of frames per video
        sign_cnt = 0
        for ann in annos:# ann --- 每一帧的标注信息,这里放过了空白帧
            
            img_name = os.path.join(img_path, s, ann['videoName'], ann['name'])
            img=cv2.imread(img_name)
            h,w,_ = img.shape
            
            img_info = {
            'file_name':img_name,
            'width':w,
            'height':h,
            'id': img_id,
            'frame_id': ann['frameIndex'] + 1,# 严格按照 数据集 标记的帧indx 来进行排序,这将有利于 判断 相邻帧 之间的关系
            'prev_image_id': -1 if ann['frameIndex'] == 0 else img_id - 1,
            'next_image_id': -1 if ann['frameIndex'] == num_frames-1 else img_id + 1,
            'video_id': video_cnt
            }# 所有的图像信息images中 ,这里也会添加空白标注帧的图像信息
            images.append(img_info)
            
            for j, lab in enumerate(ann['labels']):
                #  lab---每一个实例的标注信息  如果遇到空白标注帧--ann['labels']为空 则循环不执行 如果帧为非空 则继续执行此循环
                if lab['category'] in cat:# 为了避免 'other vehicle' 类
                    pass
                else:
                    continue
                    
                track_id = lab['id']
                     
                if sign_cnt == 0 and j==0:
                    firstid = track_id
                    sign_cnt = 1      
                     
                tid_curr = int(track_id) - int(firstid) + 1
                tid_cnt+=1
                is_crowd = lab['attributes']['crowd']
                x1, y1, x2, y2=lab['box2d']['x1'], lab['box2d']['y1'], lab['box2d']['x2'], lab['box2d']['y2']
                
                annotation = {
                    'image_id': img_id,
                    'conf': 1,
                    'bbox': [x1, y1, x2-x1, y2-y1],
                    'category_id': cat[lab['category']],
                    'id': ann_id,
                    'iscrowd':  1 if is_crowd else 0,
                    'track_id': tid_curr + tid_cnt,
                    'segmentation': [],
                    'area': (x2-x1)*(y2-y1),
                    'box_id':int(track_id)   
                }
                annotations.append(annotation)
                ann_id += 1
                    
            img_id += 1
            
        # tid_cnt += read_tid_num_per_video(os.path.join(mot_labels_path, s, v_label[:-5]+'.txt'))
            
    dataset_dict = {}
    dataset_dict["images"] = images
    dataset_dict["annotations"] = annotations
    dataset_dict["categories"] = categories
    dataset_dict["videos"] = videos
    
    json_str = json.dumps(dataset_dict)
    print(f' The number of detection objects is {ann_id - 1}, The number of detection imgs is {img_id -1} .')
    with open(out_path+f'{s}.json', 'w') as json_file:
        json_file.write(json_str)

================================================
FILE: tools/convert_cityperson_to_coco.py
================================================
import os
import numpy as np
import json
from PIL import Image

DATA_PATH = 'datasets/Cityscapes/'
DATA_FILE_PATH = 'datasets/data_path/citypersons.train'
OUT_PATH = DATA_PATH + 'annotations/'

def load_paths(data_path):
    with open(data_path, 'r') as file:
        img_files = file.readlines()
        img_files = [x.replace('\n', '') for x in img_files]
        img_files = list(filter(lambda x: len(x) > 0, img_files))
    label_files = [x.replace('images', 'labels_with_ids').replace('.png', '.txt').replace('.jpg', '.txt') for x in img_files]
    return img_files, label_files                    

if __name__ == '__main__':
    if not os.path.exists(OUT_PATH):
        os.mkdir(OUT_PATH)

    out_path = OUT_PATH + 'train.json'
    out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]}
    img_paths, label_paths = load_paths(DATA_FILE_PATH)
    image_cnt = 0
    ann_cnt = 0
    video_cnt = 0
    for img_path, label_path in zip(img_paths, label_paths):
        image_cnt += 1
        im = Image.open(os.path.join("datasets", img_path))
        image_info = {'file_name': img_path, 
                        'id': image_cnt,
                        'height': im.size[1], 
                        'width': im.size[0]}
        out['images'].append(image_info)
        # Load labels
        if os.path.isfile(os.path.join("datasets", label_path)):
            labels0 = np.loadtxt(os.path.join("datasets", label_path), dtype=np.float32).reshape(-1, 6)
            # Normalized xywh to pixel xyxy format
            labels = labels0.copy()
            labels[:, 2] = image_info['width'] * (labels0[:, 2] - labels0[:, 4] / 2)
            labels[:, 3] = image_info['height'] * (labels0[:, 3] - labels0[:, 5] / 2)
            labels[:, 4] = image_info['width'] * labels0[:, 4]
            labels[:, 5] = image_info['height'] * labels0[:, 5]
        else:
            labels = np.array([])
        for i in range(len(labels)):
            ann_cnt += 1
            fbox = labels[i, 2:6].tolist()
            ann = {'id': ann_cnt,
                    'category_id': 1,
                    'image_id': image_cnt,
                    'track_id': -1,
                    'bbox': fbox,
                    'area': fbox[2] * fbox[3],
                    'iscrowd': 0}
            out['annotations'].append(ann)
    print('loaded train for {} images and {} samples'.format(len(out['images']), len(out['annotations'])))
    json.dump(out, open(out_path, 'w'))


================================================
FILE: tools/convert_crowdhuman_to_coco.py
================================================
import os
import numpy as np
import json
from PIL import Image

DATA_PATH = 'datasets/crowdhuman/'
OUT_PATH = DATA_PATH + 'annotations/'
SPLITS = ['val', 'train']
DEBUG = False

def load_func(fpath):
    print('fpath', fpath)
    assert os.path.exists(fpath)
    with open(fpath,'r') as fid:
        lines = fid.readlines()
    records =[json.loads(line.strip('\n')) for line in lines]
    return records

if __name__ == '__main__':
    if not os.path.exists(OUT_PATH):
        os.mkdir(OUT_PATH)
    for split in SPLITS:
        data_path = DATA_PATH + split
        out_path = OUT_PATH + '{}.json'.format(split)
        out = {'images': [], 'annotations': [], 'categories': [{'id': 1, 'name': 'person'}]}
        ann_path = DATA_PATH + 'annotation_{}.odgt'.format(split)
        anns_data = load_func(ann_path)
        image_cnt = 0
        ann_cnt = 0
        video_cnt = 0
        for ann_data in anns_data:
            image_cnt += 1
            file_path = DATA_PATH + 'CrowdHuman_{}/Images/'.format(split) + '{}.jpg'.format(ann_data['ID'])
            im = Image.open(file_path)
            image_info = {'file_name': '{}.jpg'.format(ann_data['ID']), 
                          'id': image_cnt,
                          'height': im.size[1], 
                          'width': im.size[0]}
            out['images'].append(image_info)
            if split != 'test':
                anns = ann_data['gtboxes']
                for i in range(len(anns)):
                    ann_cnt += 1
                    fbox = anns[i]['fbox']
                    ann = {'id': ann_cnt,
                         'category_id': 1,
                         'image_id': image_cnt,
                         'track_id': -1,
                         'bbox_vis': anns[i]['vbox'],
                         'bbox': fbox,
                         'area': fbox[2] * fbox[3],
                         'iscrowd': 1 if 'extra' in anns[i] and \
                                         'ignore' in anns[i]['extra'] and \
                                         anns[i]['extra']['ignore'] == 1 else 0}
                    out['annotations'].append(ann)
        print('loaded {} for {} images and {} samples'.format(split, len(out['images']), len(out['annotations'])))
        json.dump(out, open(out_path, 'w'))

================================================
FILE: tools/convert_dancetrack_to_coco.py
================================================
import os
import numpy as np
import json
import cv2


# Use the same script for MOT16
DATA_PATH = 'datasets/dancetrack'
OUT_PATH = os.path.join(DATA_PATH, 'annotations')
SPLITS = ['train','test']  # --> split training data to train_half and val_half.
HALF_VIDEO = True
CREATE_SPLITTED_ANN = True
CREATE_SPLITTED_DET = True


if __name__ == '__main__':

    if not os.path.exists(OUT_PATH):
        os.makedirs(OUT_PATH)

    for split in SPLITS:
        if split == "test":
            data_path = os.path.join(DATA_PATH, 'test')
        else:
            data_path = os.path.join(DATA_PATH, 'train')
        out_path = os.path.join(OUT_PATH, '{}.json'.format(split))
        out = {'images': [], 'annotations': [], 'videos': [],
               'categories'
Download .txt
gitextract_zi_bwyml/

├── .gitattributes
├── .gitignore
├── LICENSE
├── README.md
├── diffusion/
│   └── models/
│       ├── diffusion_head.py
│       ├── diffusion_losses.py
│       ├── diffusion_models.py
│       └── diffusionnet.py
├── exps/
│   ├── default/
│   │   ├── nano.py
│   │   ├── yolov3.py
│   │   ├── yolox_l.py
│   │   ├── yolox_m.py
│   │   ├── yolox_s.py
│   │   ├── yolox_tiny.py
│   │   └── yolox_x.py
│   └── example/
│       └── mot/
│           ├── yolox_x_diffusion_det_dancetrack.py
│           ├── yolox_x_diffusion_det_mot17.py
│           ├── yolox_x_diffusion_det_mot17_ablation.py
│           ├── yolox_x_diffusion_det_mot20.py
│           ├── yolox_x_diffusion_track_dancetrack.py
│           ├── yolox_x_diffusion_track_dancetrack_baseline.py
│           ├── yolox_x_diffusion_track_mot17.py
│           ├── yolox_x_diffusion_track_mot17_ablation.py
│           ├── yolox_x_diffusion_track_mot17_baseline.py
│           ├── yolox_x_diffusion_track_mot20.py
│           └── yolox_x_diffusion_track_mot20_baseline.py
├── requirements.txt
├── setup.py
├── tools/
│   ├── convert_bdd100k_to_coco.py
│   ├── convert_cityperson_to_coco.py
│   ├── convert_crowdhuman_to_coco.py
│   ├── convert_dancetrack_to_coco.py
│   ├── convert_ethz_to_coco.py
│   ├── convert_kitti_to_coco.py
│   ├── convert_mot17_to_coco.py
│   ├── convert_mot20_to_coco.py
│   ├── convert_video.py
│   ├── mix_data_ablation.py
│   ├── mix_data_bdd100k.py
│   ├── mix_data_test_mot17.py
│   ├── mix_data_test_mot20.py
│   ├── mota.py
│   ├── track.py
│   ├── train.py
│   └── txt2video.py
└── yolox/
    ├── __init__.py
    ├── core/
    │   ├── __init__.py
    │   ├── launch.py
    │   └── trainer.py
    ├── data/
    │   ├── __init__.py
    │   ├── data_augment.py
    │   ├── data_prefetcher.py
    │   ├── dataloading.py
    │   └── samplers.py
    ├── evaluators/
    │   ├── __init__.py
    │   ├── coco_evaluator.py
    │   ├── diffusion_mot_evaluator.py
    │   ├── diffusion_mot_evaluator_kl.py
    │   └── evaluation.py
    ├── exp/
    │   ├── __init__.py
    │   ├── base_exp.py
    │   ├── build.py
    │   └── yolox_base.py
    ├── layers/
    │   ├── __init__.py
    │   ├── csrc/
    │   │   ├── cocoeval/
    │   │   │   ├── cocoeval.cpp
    │   │   │   └── cocoeval.h
    │   │   └── vision.cpp
    │   └── fast_coco_eval_api.py
    ├── models/
    │   ├── __init__.py
    │   ├── darknet.py
    │   ├── losses.py
    │   ├── network_blocks.py
    │   ├── yolo_fpn.py
    │   ├── yolo_head.py
    │   ├── yolo_pafpn.py
    │   └── yolox.py
    ├── tracker/
    │   ├── basetrack.py
    │   ├── diffusion_tracker.py
    │   ├── diffusion_tracker_kl.py
    │   ├── kalman_filter.py
    │   └── matching.py
    ├── tracking_utils/
    │   ├── evaluation.py
    │   ├── io.py
    │   └── timer.py
    └── utils/
        ├── __init__.py
        ├── allreduce_norm.py
        ├── box_ops.py
        ├── boxes.py
        ├── checkpoint.py
        ├── cluster_nms.py
        ├── demo_utils.py
        ├── dist.py
        ├── ema.py
        ├── logger.py
        ├── lr_scheduler.py
        ├── metric.py
        ├── model_utils.py
        ├── setup_env.py
        └── visualize.py
Download .txt
SYMBOL INDEX (546 symbols across 79 files)

FILE: diffusion/models/diffusion_head.py
  function exists (line 21) | def exists(x):
  function default (line 25) | def default(val, d):
  function extract (line 31) | def extract(a, t, x_shape):
  function cosine_beta_schedule (line 38) | def cosine_beta_schedule(timesteps, s=0.008):
  class DiffusionHead (line 50) | class DiffusionHead(nn.Module):
    method __init__ (line 55) | def __init__(self,
    method predict_noise_from_start (line 155) | def predict_noise_from_start(self, x_t, t, x0):
    method model_predictions (line 161) | def model_predictions(self, backbone_feats,images_whwh,x,t,lost_featur...
    method new_ddim_sample (line 189) | def new_ddim_sample(self,backbone_feats,images_whwh,ref_targets=None,d...
    method q_sample (line 354) | def q_sample(self, x_start, t, noise=None):
    method forward (line 363) | def forward(self,features,mate_info,targets=None):
    method prepare_diffusion_repeat (line 395) | def prepare_diffusion_repeat(self,gt_boxes,t,ref_repeat_tensor=None):
    method prepare_diffusion_concat (line 436) | def prepare_diffusion_concat(self,gt_boxes,t,ref_mask=None):
    method prepare_targets (line 489) | def prepare_targets(self,targets,images_whwh):

FILE: diffusion/models/diffusion_losses.py
  class SetCriterionDynamicK (line 11) | class SetCriterionDynamicK(nn.Module):
    method __init__ (line 17) | def __init__(self,num_classes, matcher, weight_dict, eos_coef, losses,...
    method get_fed_loss_classes (line 53) | def get_fed_loss_classes(self, gt_classes, num_fed_loss_classes, num_c...
    method loss_labels (line 80) | def loss_labels(self, outputs, targets, indices, num_boxes, log=False):
    method loss_boxes (line 153) | def loss_boxes(self, outputs, targets, indices, num_boxes):
    method _get_src_permutation_idx (line 203) | def _get_src_permutation_idx(self, indices):
    method _get_tgt_permutation_idx (line 209) | def _get_tgt_permutation_idx(self, indices):
    method get_loss (line 215) | def get_loss(self, loss, outputs, targets, indices, num_boxes, **kwargs):
    method forward (line 223) | def forward(self, outputs, targets):
  class HungarianMatcherDynamicK (line 266) | class HungarianMatcherDynamicK(nn.Module):
    method __init__ (line 272) | def __init__(self,  cost_class: float = 1, cost_bbox: float = 1, cost_...
    method forward (line 291) | def forward(self, outputs, targets):
    method get_in_boxes_info (line 399) | def get_in_boxes_info(self, boxes, target_gts, expanded_strides):
    method dynamic_k_matching (line 430) | def dynamic_k_matching(self, cost, pair_wise_ious, num_gt):

FILE: diffusion/models/diffusion_models.py
  function exists (line 14) | def exists(val):
  class SinusoidalPositionEmbeddings (line 24) | class SinusoidalPositionEmbeddings(nn.Module):
    method __init__ (line 25) | def __init__(self, dim):
    method forward (line 29) | def forward(self, time):
  class GaussianFourierProjection (line 39) | class GaussianFourierProjection(nn.Module):
    method __init__ (line 42) | def __init__(self, embed_dim, scale=30.):
    method forward (line 48) | def forward(self, x):
  class Dense (line 53) | class Dense(nn.Module):
    method __init__ (line 56) | def __init__(self, input_dim, output_dim):
    method forward (line 60) | def forward(self, x):
  class DynamicHead (line 64) | class DynamicHead(nn.Module):
    method __init__ (line 66) | def __init__(self,
    method _reset_parameters (line 112) | def _reset_parameters(self):
    method _init_box_pooler (line 124) | def _init_box_pooler(pooler_resolution,strides,in_channels):
    method forward (line 143) | def forward(self,features,init_bboxes,t,lost_features=None,fix_ref_box...
  class RCNNHead (line 168) | class RCNNHead(nn.Module):
    method __init__ (line 170) | def __init__(self,d_model, num_classes, pooler_resolution,dim_feedforw...
    method forward (line 248) | def forward(self, features,bboxes,pro_features,pooler,time_emb,lost_fe...
    method apply_deltas (line 385) | def apply_deltas(self, deltas, boxes):
  class SFT (line 426) | class SFT(nn.Module):
    method __init__ (line 428) | def __init__(self, hidden_dim, pooler_resolution,dim_dynamic=2*64,num_...
    method forward (line 448) | def forward(self,roi_features,pro_features):
  class PerceiverAttention (line 477) | class PerceiverAttention(nn.Module):
    method __init__ (line 478) | def __init__(self, *, dim, dim_head=64, heads=8):
    method forward (line 491) | def forward(self, x, latents):
  function FeedForward (line 520) | def FeedForward(dim, mult=4):
  class WindowAttention (line 573) | class WindowAttention(nn.Module):
    method __init__ (line 586) | def __init__(self, dim, window_size, num_heads, qkv_bias=True, qk_scal...
    method forward (line 606) | def forward(self,q,k,v,H,W):
  function _get_clones (line 657) | def _get_clones(module, N):
  function _get_activation_fn (line 661) | def _get_activation_fn(activation):
  function window_partition (line 671) | def window_partition(x, window_size):
  function window_reverse (line 685) | def window_reverse(windows, window_size, H, W):

FILE: diffusion/models/diffusionnet.py
  class DiffusionNet (line 13) | class DiffusionNet(nn.Module):
    method __init__ (line 18) | def __init__(self, backbone=None, head=None, act="silu"):
    method forward (line 34) | def forward(self, x, targets=(None,None),random_flip=False,input_size=...

FILE: exps/default/nano.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_model (line 22) | def get_model(self, sublinear=False):

FILE: exps/default/yolov3.py
  class Exp (line 12) | class Exp(MyExp):
    method __init__ (line 13) | def __init__(self):
    method get_model (line 19) | def get_model(self, sublinear=False):
    method get_data_loader (line 35) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):

FILE: exps/default/yolox_l.py
  class Exp (line 10) | class Exp(MyExp):
    method __init__ (line 11) | def __init__(self):

FILE: exps/default/yolox_m.py
  class Exp (line 10) | class Exp(MyExp):
    method __init__ (line 11) | def __init__(self):

FILE: exps/default/yolox_s.py
  class Exp (line 10) | class Exp(MyExp):
    method __init__ (line 11) | def __init__(self):

FILE: exps/default/yolox_tiny.py
  class Exp (line 10) | class Exp(MyExp):
    method __init__ (line 11) | def __init__(self):

FILE: exps/default/yolox_x.py
  class Exp (line 10) | class Exp(MyExp):
    method __init__ (line 11) | def __init__(self):

FILE: exps/example/mot/yolox_x_diffusion_det_dancetrack.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 38) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 100) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 133) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 149) | def get_model(self):
    method get_optimizer (line 171) | def get_optimizer(self, batch_size):

FILE: exps/example/mot/yolox_x_diffusion_det_mot17.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 38) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 100) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 133) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 149) | def get_model(self):
    method get_optimizer (line 171) | def get_optimizer(self, batch_size):

FILE: exps/example/mot/yolox_x_diffusion_det_mot17_ablation.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 38) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 100) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 133) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 149) | def get_model(self):
    method get_optimizer (line 171) | def get_optimizer(self, batch_size):

FILE: exps/example/mot/yolox_x_diffusion_det_mot20.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 38) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 100) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 133) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 149) | def get_model(self):
    method get_optimizer (line 171) | def get_optimizer(self, batch_size):

FILE: exps/example/mot/yolox_x_diffusion_track_dancetrack.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 37) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 101) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 134) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 150) | def get_model(self):
    method get_optimizer (line 172) | def get_optimizer(self, batch_size):

FILE: exps/example/mot/yolox_x_diffusion_track_dancetrack_baseline.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 37) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 101) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 134) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 150) | def get_model(self):
    method get_optimizer (line 172) | def get_optimizer(self, batch_size):

FILE: exps/example/mot/yolox_x_diffusion_track_mot17.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 37) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 101) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 134) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 150) | def get_model(self):
    method get_optimizer (line 172) | def get_optimizer(self, batch_size):

FILE: exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 38) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 102) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 135) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 151) | def get_model(self):
    method get_optimizer (line 173) | def get_optimizer(self, batch_size):

FILE: exps/example/mot/yolox_x_diffusion_track_mot17_baseline.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 37) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 101) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 134) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 150) | def get_model(self):
    method get_optimizer (line 172) | def get_optimizer(self, batch_size):

FILE: exps/example/mot/yolox_x_diffusion_track_mot20.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 37) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 101) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 134) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 150) | def get_model(self):
    method get_optimizer (line 172) | def get_optimizer(self, batch_size):

FILE: exps/example/mot/yolox_x_diffusion_track_mot20_baseline.py
  class Exp (line 11) | class Exp(MyExp):
    method __init__ (line 12) | def __init__(self):
    method get_data_loader (line 37) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method get_eval_loader (line 101) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 134) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method get_model (line 150) | def get_model(self):
    method get_optimizer (line 172) | def get_optimizer(self, batch_size):

FILE: setup.py
  function get_extensions (line 15) | def get_extensions():

FILE: tools/convert_bdd100k_to_coco.py
  function read_tid_num_per_video (line 41) | def read_tid_num_per_video(video_ann_dir):

FILE: tools/convert_cityperson_to_coco.py
  function load_paths (line 10) | def load_paths(data_path):

FILE: tools/convert_crowdhuman_to_coco.py
  function load_func (line 11) | def load_func(fpath):

FILE: tools/convert_ethz_to_coco.py
  function load_paths (line 10) | def load_paths(data_path):

FILE: tools/convert_kitti_to_coco.py
  function project_to_image (line 42) | def project_to_image(pts_3d, P):
  function read_clib (line 52) | def read_clib(calib_path):
  function _bbox_to_coco_bbox (line 60) | def _bbox_to_coco_bbox(bbox):

FILE: tools/convert_video.py
  function convert_video (line 3) | def convert_video(video_path):

FILE: tools/mota.py
  function compare_dataframes (line 31) | def compare_dataframes(gts, ts):

FILE: tools/track.py
  function make_parser (line 33) | def make_parser():
  function compare_dataframes (line 127) | def compare_dataframes(gts, ts):
  function main (line 142) | def main(exp, args, num_gpu):

FILE: tools/train.py
  function make_parser (line 24) | def make_parser():
  function main (line 95) | def main(exp, args):

FILE: tools/txt2video.py
  function colormap (line 9) | def colormap(rgb=False):
  function txt2img (line 99) | def txt2img(visual_path="visual_val_gt"):
  function img2video (line 189) | def img2video(visual_path="visual_val_gt"):

FILE: yolox/core/launch.py
  function _find_free_port (line 25) | def _find_free_port():
  function launch (line 40) | def launch(
  function launch_by_subprocess (line 93) | def launch_by_subprocess(
  function _distributed_worker (line 168) | def _distributed_worker(

FILE: yolox/core/trainer.py
  class Trainer (line 32) | class Trainer:
    method __init__ (line 33) | def __init__(self, exp, args):
    method train (line 70) | def train(self):
    method train_in_epoch (line 79) | def train_in_epoch(self):
    method train_in_iter (line 85) | def train_in_iter(self):
    method train_one_iter (line 91) | def train_one_iter(self):
    method before_train (line 128) | def before_train(self):
    method after_train (line 184) | def after_train(self):
    method before_epoch (line 191) | def before_epoch(self):
    method after_epoch (line 208) | def after_epoch(self):
    method before_iter (line 219) | def before_iter(self):
    method after_iter (line 222) | def after_iter(self):
    method progress_in_iter (line 268) | def progress_in_iter(self):
    method resume_train (line 271) | def resume_train(self, model):
    method evaluate_and_save_model (line 304) | def evaluate_and_save_model(self):
    method save_ckpt (line 320) | def save_ckpt(self, ckpt_name, update_best_ckpt=False):

FILE: yolox/data/data_augment.py
  function augment_hsv (line 23) | def augment_hsv(img, hgain=0.015, sgain=0.7, vgain=0.4):
  function box_candidates (line 39) | def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2):
  function random_perspective (line 54) | def random_perspective(
  function _distort (line 150) | def _distort(image):
  function _mirror (line 180) | def _mirror(image, boxes):
  function preproc (line 189) | def preproc(image, input_size, mean, std, swap=(2, 0, 1)):
  class TrainTransform (line 214) | class TrainTransform:
    method __init__ (line 215) | def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=100):
    method __call__ (line 221) | def __call__(self, image, targets, input_dim):
  class DiffusionValTransform (line 272) | class DiffusionValTransform:
    method __init__ (line 273) | def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=100):
    method __call__ (line 279) | def __call__(self, image, targets, input_dim):
  class DiffusionTrainTransform (line 313) | class DiffusionTrainTransform:
    method __init__ (line 314) | def __init__(self, p=0.5, rgb_means=None, std=None, max_labels=100):
    method __call__ (line 320) | def __call__(self, ref_image, ref_targets, track_image, track_targets,...
  class ValTransform (line 390) | class ValTransform:
    method __init__ (line 408) | def __init__(self, rgb_means=None, std=None, swap=(2, 0, 1)):
    method __call__ (line 414) | def __call__(self, img, res, input_size):

FILE: yolox/data/data_prefetcher.py
  class DataPrefetcher (line 13) | class DataPrefetcher:
    method __init__ (line 21) | def __init__(self, loader,task):
    method preload (line 28) | def preload(self):
    method next (line 50) | def next(self):
    method _record_stream_for_image (line 73) | def _record_stream_for_image(input):
  function random_resize (line 77) | def random_resize(data_loader, exp, epoch, rank, is_distributed):

FILE: yolox/data/dataloading.py
  function get_yolox_datadir (line 15) | def get_yolox_datadir():
  class DataLoader (line 29) | class DataLoader(torchDataLoader):
    method __init__ (line 72) | def __init__(self, *args, **kwargs):
    method close_mosaic (line 120) | def close_mosaic(self):
    method change_input_dim (line 123) | def change_input_dim(self, multiple=32, random_range=(10, 19)):
  function list_collate (line 164) | def list_collate(batch):

FILE: yolox/data/samplers.py
  class YoloBatchSampler (line 14) | class YoloBatchSampler(torchBatchSampler):
    method __init__ (line 21) | def __init__(self, *args, input_dimension=None, mosaic=True, **kwargs):
    method __iter__ (line 27) | def __iter__(self):
    method __set_input_dim (line 33) | def __set_input_dim(self):
  class InfiniteSampler (line 40) | class InfiniteSampler(Sampler):
    method __init__ (line 51) | def __init__(
    method __iter__ (line 79) | def __iter__(self):
    method _infinite_indices (line 85) | def _infinite_indices(self):
    method __len__ (line 94) | def __len__(self):

FILE: yolox/evaluators/coco_evaluator.py
  class COCOEvaluator (line 28) | class COCOEvaluator:
    method __init__ (line 34) | def __init__(
    method evaluate (line 55) | def evaluate(
    method convert_to_coco_format (line 142) | def convert_to_coco_format(self, outputs, info_imgs, ids):
    method evaluate_prediction (line 174) | def evaluate_prediction(self, data_dict, statistics):

FILE: yolox/evaluators/diffusion_mot_evaluator.py
  function write_results (line 27) | def write_results(filename, results):
  function write_results_no_score (line 40) | def write_results_no_score(filename, results):
  class DiffusionMOTEvaluator (line 53) | class DiffusionMOTEvaluator:
    method __init__ (line 59) | def __init__(
    method evaluate (line 80) | def evaluate(
    method convert_to_coco_format (line 253) | def convert_to_coco_format(self, output, info_imgs, ids):
    method evaluate_prediction (line 283) | def evaluate_prediction(self, data_dict, statistics):

FILE: yolox/evaluators/diffusion_mot_evaluator_kl.py
  function write_results (line 29) | def write_results(filename, results):
  function write_results_no_score (line 42) | def write_results_no_score(filename, results):
  class DiffusionMOTEvaluatorKL (line 55) | class DiffusionMOTEvaluatorKL:
    method __init__ (line 61) | def __init__(
    method evaluate (line 82) | def evaluate(
    method convert_to_coco_format (line 224) | def convert_to_coco_format(self, output, info_imgs, ids):
    method evaluate_prediction (line 254) | def evaluate_prediction(self, data_dict, statistics):

FILE: yolox/evaluators/evaluation.py
  class Evaluator (line 8) | class Evaluator(object):
    method __init__ (line 10) | def __init__(self, data_root, seq_name, data_type):
    method load_annotations (line 18) | def load_annotations(self):
    method reset_accumulator (line 25) | def reset_accumulator(self):
    method eval_frame (line 28) | def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
    method eval_file (line 76) | def eval_file(self, filename):
    method get_summary (line 90) | def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', '...
    method save_summary (line 107) | def save_summary(summary, filename):
  function read_results (line 117) | def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
  function read_mot_results (line 144) | def read_mot_results(filename, is_gt, is_ignore):
  function unzip_objs (line 193) | def unzip_objs(objs):

FILE: yolox/exp/base_exp.py
  class BaseExp (line 17) | class BaseExp(metaclass=ABCMeta):
    method __init__ (line 20) | def __init__(self):
    method get_model (line 27) | def get_model(self) -> Module:
    method get_data_loader (line 31) | def get_data_loader(
    method get_optimizer (line 37) | def get_optimizer(self, batch_size: int) -> torch.optim.Optimizer:
    method get_lr_scheduler (line 41) | def get_lr_scheduler(
    method get_evaluator (line 47) | def get_evaluator(self):
    method eval (line 51) | def eval(self, model, evaluator, weights):
    method __repr__ (line 54) | def __repr__(self):
    method merge (line 63) | def merge(self, cfg_list):

FILE: yolox/exp/build.py
  function get_exp_by_file (line 10) | def get_exp_by_file(exp_file):
  function get_exp_by_name (line 20) | def get_exp_by_name(exp_name):
  function get_exp (line 38) | def get_exp(exp_file, exp_name):

FILE: yolox/exp/yolox_base.py
  class Exp (line 15) | class Exp(BaseExp):
    method __init__ (line 16) | def __init__(self):
    method get_model (line 63) | def get_model(self):
    method get_data_loader (line 82) | def get_data_loader(self, batch_size, is_distributed, no_aug=False):
    method random_resize (line 141) | def random_resize(self, data_loader, epoch, rank, is_distributed):
    method get_optimizer (line 160) | def get_optimizer(self, batch_size):
    method get_lr_scheduler (line 188) | def get_lr_scheduler(self, lr, iters_per_epoch):
    method get_eval_loader (line 203) | def get_eval_loader(self, batch_size, is_distributed, testdev=False):
    method get_evaluator (line 234) | def get_evaluator(self, batch_size, is_distributed, testdev=False):
    method eval (line 248) | def eval(self, model, evaluator, is_distributed, half=False):

FILE: yolox/layers/csrc/cocoeval/cocoeval.cpp
  type COCOeval (line 10) | namespace COCOeval {
    function SortInstancesByDetectionScore (line 16) | void SortInstancesByDetectionScore(
    function SortInstancesByIgnore (line 32) | void SortInstancesByIgnore(
    function MatchDetectionsToGroundTruth (line 59) | void MatchDetectionsToGroundTruth(
    function EvaluateImages (line 140) | std::vector<ImageEvaluation> EvaluateImages(
    function list_to_vec (line 201) | std::vector<T> list_to_vec(const py::list& l) {
    function BuildSortedDetectionList (line 221) | int BuildSortedDetectionList(
    function ComputePrecisionRecallCurve (line 282) | void ComputePrecisionRecallCurve(
    function Accumulate (line 370) | py::dict Accumulate(

FILE: yolox/layers/csrc/cocoeval/cocoeval.h
  function namespace (line 12) | namespace COCOeval {

FILE: yolox/layers/csrc/vision.cpp
  function PYBIND11_MODULE (line 3) | PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {

FILE: yolox/layers/fast_coco_eval_api.py
  class COCOeval_opt (line 19) | class COCOeval_opt(COCOeval):
    method evaluate (line 25) | def evaluate(self):
    method accumulate (line 124) | def accumulate(self):

FILE: yolox/models/darknet.py
  class Darknet (line 10) | class Darknet(nn.Module):
    method __init__ (line 14) | def __init__(
    method diffusion_freeze (line 60) | def diffusion_freeze(self):
    method make_group_layer (line 66) | def make_group_layer(self, in_channels: int, num_blocks: int, stride: ...
    method make_spp_block (line 73) | def make_spp_block(self, filters_list, in_filters):
    method forward (line 89) | def forward(self, x):
  class CSPDarknet (line 104) | class CSPDarknet(nn.Module):
    method __init__ (line 105) | def __init__(
    method forward (line 174) | def forward(self, x):

FILE: yolox/models/losses.py
  class IOUloss (line 10) | class IOUloss(nn.Module):
    method __init__ (line 11) | def __init__(self, reduction="none", loss_type="iou"):
    method forward (line 16) | def forward(self, pred, target):
  function sigmoid_focal_loss (line 56) | def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, ...

FILE: yolox/models/network_blocks.py
  class SiLU (line 9) | class SiLU(nn.Module):
    method forward (line 13) | def forward(x):
  function get_activation (line 17) | def get_activation(name="silu", inplace=True):
  class BaseConv (line 29) | class BaseConv(nn.Module):
    method __init__ (line 32) | def __init__(
    method forward (line 50) | def forward(self, x):
    method fuseforward (line 53) | def fuseforward(self, x):
  class DWConv (line 57) | class DWConv(nn.Module):
    method __init__ (line 60) | def __init__(self, in_channels, out_channels, ksize, stride=1, act="si...
    method forward (line 74) | def forward(self, x):
  class Bottleneck (line 79) | class Bottleneck(nn.Module):
    method __init__ (line 81) | def __init__(
    method forward (line 97) | def forward(self, x):
  class ResLayer (line 104) | class ResLayer(nn.Module):
    method __init__ (line 107) | def __init__(self, in_channels: int):
    method forward (line 117) | def forward(self, x):
  class SPPBottleneck (line 122) | class SPPBottleneck(nn.Module):
    method __init__ (line 125) | def __init__(
    method forward (line 140) | def forward(self, x):
  class CSPLayer (line 147) | class CSPLayer(nn.Module):
    method __init__ (line 150) | def __init__(
    method forward (line 180) | def forward(self, x):
  class Focus (line 188) | class Focus(nn.Module):
    method __init__ (line 191) | def __init__(self, in_channels, out_channels, ksize=1, stride=1, act="...
    method forward (line 195) | def forward(self, x):

FILE: yolox/models/yolo_fpn.py
  class YOLOFPN (line 12) | class YOLOFPN(nn.Module):
    method __init__ (line 17) | def __init__(
    method _make_cbl (line 38) | def _make_cbl(self, _in, _out, ks):
    method _make_embedding (line 41) | def _make_embedding(self, filters_list, in_filters):
    method load_pretrained_model (line 53) | def load_pretrained_model(self, filename="./weights/darknet53.mix.pth"):
    method forward (line 59) | def forward(self, inputs):

FILE: yolox/models/yolo_head.py
  class YOLOXHead (line 19) | class YOLOXHead(nn.Module):
    method __init__ (line 20) | def __init__(
    method initialize_biases (line 134) | def initialize_biases(self, prior_prob):
    method forward (line 145) | def forward(self, xin, labels=None, imgs=None):
    method get_output_and_grid (line 218) | def get_output_and_grid(self, output, k, stride, dtype):
    method decode_outputs (line 238) | def decode_outputs(self, outputs, dtype):
    method get_losses (line 255) | def get_losses(
    method get_l1_target (line 429) | def get_l1_target(self, l1_target, gt, stride, x_shifts, y_shifts, eps...
    method get_assignments (line 437) | def get_assignments(
    method get_in_boxes_info (line 536) | def get_in_boxes_info(
    method dynamic_k_matching (line 627) | def dynamic_k_matching(self, cost, pair_wise_ious, gt_classes, num_gt,...

FILE: yolox/models/yolo_pafpn.py
  class YOLOPAFPN (line 12) | class YOLOPAFPN(nn.Module):
    method __init__ (line 17) | def __init__(
    method forward (line 84) | def forward(self, input):

FILE: yolox/models/yolox.py
  class YOLOX (line 10) | class YOLOX(nn.Module):
    method __init__ (line 17) | def __init__(self, backbone=None, head=None):
    method forward (line 27) | def forward(self, x, targets=None):

FILE: yolox/tracker/basetrack.py
  class TrackState (line 5) | class TrackState(object):
  class BaseTrack (line 12) | class BaseTrack(object):
    method end_frame (line 31) | def end_frame(self):
    method next_id (line 35) | def next_id():
    method activate (line 39) | def activate(self, *args):
    method predict (line 42) | def predict(self):
    method update (line 45) | def update(self, *args, **kwargs):
    method mark_lost (line 48) | def mark_lost(self):
    method mark_removed (line 51) | def mark_removed(self):

FILE: yolox/tracker/diffusion_tracker.py
  class DiffusionTracker (line 17) | class DiffusionTracker(object):
    method __init__ (line 18) | def __init__(self,model,tensor_type,conf_thresh=0.7,det_thresh=0.6,nms...
    method update (line 49) | def update(self,cur_image):
    method get_results (line 181) | def get_results(self):
    method extract_feature (line 191) | def extract_feature(self,cur_image):
    method extract_mean_track_t (line 199) | def extract_mean_track_t(self,pre_box,cur_box):
    method diffusion_postprocess (line 209) | def diffusion_postprocess(self,diffusion_outputs,conf_scores,nms_thre=...
    method diffusion_track_filt (line 257) | def diffusion_track_filt(self,ref_detections,track_detections,conf_thr...
    method diffusion_det_filt (line 274) | def diffusion_det_filt(self,diffusion_detections,conf_thre=0.6,nms_thr...
    method diffusion_matching (line 290) | def diffusion_matching(self,ref_bboxes,ref_track_ids,diffusion_pre_tra...
    method proposal_schedule (line 300) | def proposal_schedule(self,num_ref_bboxes):
    method sampling_steps_schedule (line 304) | def sampling_steps_schedule(self,num_ref_bboxes):
    method vote_to_remove_candidate (line 313) | def vote_to_remove_candidate(self,track_ids,detections,vote_iou_thres=...
    method prepare_input (line 349) | def prepare_input(self,pre_features,cur_features):
    method get_targets_from_tracklet_db (line 357) | def get_targets_from_tracklet_db(self):
  function joint_stracks (line 364) | def joint_stracks(tlista, tlistb):
  function sub_stracks (line 378) | def sub_stracks(tlista, tlistb):

FILE: yolox/tracker/diffusion_tracker_kl.py
  class STrack (line 19) | class STrack(BaseTrack):
    method __init__ (line 21) | def __init__(self, tlwh, score):
    method predict (line 32) | def predict(self):
    method multi_predict (line 39) | def multi_predict(stracks):
    method activate (line 51) | def activate(self, kalman_filter, frame_id):
    method re_activate (line 65) | def re_activate(self, new_track, frame_id, new_id=False):
    method update (line 78) | def update(self, new_track, frame_id):
    method tlwh (line 100) | def tlwh(self):
    method tlbr (line 113) | def tlbr(self):
    method tlwh_to_xyah (line 123) | def tlwh_to_xyah(tlwh):
    method to_xyah (line 132) | def to_xyah(self):
    method tlbr_to_tlwh (line 137) | def tlbr_to_tlwh(tlbr):
    method tlwh_to_tlbr (line 144) | def tlwh_to_tlbr(tlwh):
    method __repr__ (line 149) | def __repr__(self):
  class DiffusionTracker (line 152) | class DiffusionTracker(object):
    method __init__ (line 153) | def __init__(self,model,tensor_type,conf_thresh=0.7,det_thresh=0.6,nms...
    method update (line 187) | def update(self,cur_image):
    method extract_feature (line 321) | def extract_feature(self,cur_image):
    method extract_mean_track_t (line 329) | def extract_mean_track_t(self,pre_box,cur_box):
    method diffusion_postprocess (line 342) | def diffusion_postprocess(self,diffusion_outputs,conf_scores,nms_thre=...
    method diffusion_track_filt (line 390) | def diffusion_track_filt(self,ref_detections,track_detections,conf_thr...
    method diffusion_det_filt (line 407) | def diffusion_det_filt(self,diffusion_detections,conf_thre=0.6,nms_thr...
    method proposal_schedule (line 423) | def proposal_schedule(self,num_ref_bboxes):
    method sampling_steps_schedule (line 427) | def sampling_steps_schedule(self,num_ref_bboxes):
    method vote_to_remove_candidate (line 436) | def vote_to_remove_candidate(self,track_ids,detections,vote_iou_thres=...
    method prepare_input (line 472) | def prepare_input(self,pre_features,cur_features):
  function joint_stracks (line 487) | def joint_stracks(tlista, tlistb):
  function sub_stracks (line 501) | def sub_stracks(tlista, tlistb):
  function remove_duplicate_stracks (line 512) | def remove_duplicate_stracks(stracksa, stracksb):

FILE: yolox/tracker/kalman_filter.py
  class KalmanFilter (line 23) | class KalmanFilter(object):
    method __init__ (line 40) | def __init__(self):
    method initiate (line 55) | def initiate(self, measurement):
    method predict (line 88) | def predict(self, mean, covariance):
    method project (line 126) | def project(self, mean, covariance):
    method multi_predict (line 155) | def multi_predict(self, mean, covariance):
    method update (line 194) | def update(self, mean, covariance, measurement):
    method gating_distance (line 228) | def gating_distance(self, mean, covariance, measurements,

FILE: yolox/tracker/matching.py
  function merge_matches (line 10) | def merge_matches(m1, m2, shape):
  function _indices_to_matches (line 27) | def _indices_to_matches(cost_matrix, indices, thresh):
  function linear_assignment (line 38) | def linear_assignment(cost_matrix, thresh):
  function ious (line 52) | def ious(atlbrs, btlbrs):
  function iou_distance (line 72) | def iou_distance(atracks, btracks):
  function v_iou_distance (line 92) | def v_iou_distance(atracks, btracks):
  function embedding_distance (line 112) | def embedding_distance(tracks, detections, metric='cosine'):
  function fuse_iou (line 158) | def fuse_iou(cost_matrix, tracks, detections):
  function fuse_score (line 172) | def fuse_score(cost_matrix, scores):

FILE: yolox/tracking_utils/evaluation.py
  class Evaluator (line 10) | class Evaluator(object):
    method __init__ (line 12) | def __init__(self, data_root, seq_name, data_type):
    method load_annotations (line 20) | def load_annotations(self):
    method reset_accumulator (line 27) | def reset_accumulator(self):
    method eval_frame (line 30) | def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
    method eval_file (line 78) | def eval_file(self, filename):
    method get_summary (line 92) | def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', '...
    method save_summary (line 109) | def save_summary(summary, filename):

FILE: yolox/tracking_utils/io.py
  function write_results (line 6) | def write_results(filename, results_dict: Dict, data_type: str):
  function read_results (line 33) | def read_results(filename, data_type: str, is_gt=False, is_ignore=False):
  function read_mot_results (line 60) | def read_mot_results(filename, is_gt, is_ignore):
  function unzip_objs (line 109) | def unzip_objs(objs):

FILE: yolox/tracking_utils/timer.py
  class Timer (line 4) | class Timer(object):
    method __init__ (line 6) | def __init__(self):
    method tic (line 15) | def tic(self):
    method toc (line 20) | def toc(self, average=True):
    method clear (line 31) | def clear(self):

FILE: yolox/utils/allreduce_norm.py
  function get_async_norm_states (line 32) | def get_async_norm_states(module):
  function pyobj2tensor (line 41) | def pyobj2tensor(pyobj, device="cuda"):
  function tensor2pyobj (line 47) | def tensor2pyobj(tensor):
  function _get_reduce_op (line 52) | def _get_reduce_op(op_name):
  function all_reduce (line 59) | def all_reduce(py_dict, op="sum", group=None):
  function all_reduce_norm (line 97) | def all_reduce_norm(module):

FILE: yolox/utils/box_ops.py
  function box_cxcywh_to_xyxy (line 9) | def box_cxcywh_to_xyxy(x):
  function box_xyxy_to_cxcywh (line 16) | def box_xyxy_to_cxcywh(x):
  function box_iou (line 24) | def box_iou(boxes1, boxes2):
  function generalized_box_iou (line 40) | def generalized_box_iou(boxes1,boxes2,boxes3,boxes4):
  function masks_to_boxes (line 80) | def masks_to_boxes(masks):

FILE: yolox/utils/boxes.py
  function filter_box (line 24) | def filter_box(output, scale_range):
  function postprocess (line 35) | def postprocess(prediction, num_classes, conf_thre=0.7, nms_thre=0.45):
  function diffusion_postprocess (line 76) | def diffusion_postprocess(pre_prediction,cur_prediction,conf_scores,conf...
  function bboxes_iou (line 127) | def bboxes_iou(bboxes_a, bboxes_b, xyxy=True):
  function matrix_iou (line 153) | def matrix_iou(a, b):
  function adjust_box_anns (line 166) | def adjust_box_anns(bbox, scale_ratio, padw, padh, w_max, h_max):
  function xyxy2xywh (line 174) | def xyxy2xywh(bboxes):
  function xyxy2cxcywh (line 180) | def xyxy2cxcywh(bboxes):

FILE: yolox/utils/checkpoint.py
  function load_ckpt (line 12) | def load_ckpt(model, ckpt):
  function save_checkpoint (line 37) | def save_checkpoint(state, is_best, save_dir, model_name=""):

FILE: yolox/utils/cluster_nms.py
  function intersect (line 4) | def intersect(box_a, box_b):
  function garea (line 25) | def garea(box_a, box_b):
  function get_box_area (line 46) | def get_box_area(box):
  function giou_3d (line 49) | def giou_3d(box_a,box_b,box_c,box_d):
  function cluster_nms (line 86) | def cluster_nms(boxes_a,boxes_c,scores,iou_threshold:float=0.5, top_k:in...

FILE: yolox/utils/demo_utils.py
  function mkdir (line 12) | def mkdir(path):
  function nms (line 17) | def nms(boxes, scores, nms_thr):
  function multiclass_nms (line 47) | def multiclass_nms(boxes, scores, nms_thr, score_thr):
  function demo_postprocess (line 71) | def demo_postprocess(outputs, img_size, p6=False):

FILE: yolox/utils/dist.py
  function synchronize (line 37) | def synchronize():
  function get_world_size (line 51) | def get_world_size() -> int:
  function get_rank (line 59) | def get_rank() -> int:
  function get_local_rank (line 67) | def get_local_rank() -> int:
  function get_local_size (line 80) | def get_local_size() -> int:
  function is_main_process (line 92) | def is_main_process() -> bool:
  function _get_global_gloo_group (line 97) | def _get_global_gloo_group():
  function _serialize_to_tensor (line 108) | def _serialize_to_tensor(data, group):
  function _pad_to_largest_tensor (line 126) | def _pad_to_largest_tensor(tensor, group):
  function all_gather (line 156) | def all_gather(data, group=None):
  function gather (line 194) | def gather(data, dst=0, group=None):
  function shared_random_seed (line 238) | def shared_random_seed():
  function time_synchronized (line 251) | def time_synchronized():
  function is_dist_avail_and_initialized (line 257) | def is_dist_avail_and_initialized():

FILE: yolox/utils/ema.py
  function is_parallel (line 11) | def is_parallel(model):
  function copy_attr (line 21) | def copy_attr(a, b, include=(), exclude=()):
  class ModelEMA (line 30) | class ModelEMA:
    method __init__ (line 41) | def __init__(self, model, decay=0.9999, updates=0):
    method update (line 56) | def update(self, model):
    method update_attr (line 70) | def update_attr(self, model, include=(), exclude=("process_group", "re...

FILE: yolox/utils/logger.py
  function get_caller_name (line 12) | def get_caller_name(depth=0):
  class StreamToLoguru (line 28) | class StreamToLoguru:
    method __init__ (line 33) | def __init__(self, level="INFO", caller_names=("apex", "pycocotools")):
    method write (line 44) | def write(self, buf):
    method flush (line 54) | def flush(self):
  function redirect_sys_output (line 58) | def redirect_sys_output(log_level="INFO"):
  function setup_logger (line 64) | def setup_logger(save_dir, distributed_rank=0, filename="log.txt", mode=...

FILE: yolox/utils/lr_scheduler.py
  class LRScheduler (line 9) | class LRScheduler:
    method __init__ (line 10) | def __init__(self, name, lr, iters_per_epoch, total_epochs, **kwargs):
    method update_lr (line 33) | def update_lr(self, iters):
    method _get_lr_func (line 36) | def _get_lr_func(self, name):
  function cos_lr (line 97) | def cos_lr(lr, total_iters, iters):
  function warm_cos_lr (line 103) | def warm_cos_lr(lr, total_iters, warmup_total_iters, warmup_lr_start, it...
  function yolox_warm_cos_lr (line 121) | def yolox_warm_cos_lr(
  function yolox_semi_warm_cos_lr (line 151) | def yolox_semi_warm_cos_lr(
  function multistep_lr (line 201) | def multistep_lr(lr, milestones, gamma, iters):

FILE: yolox/utils/metric.py
  function get_total_and_free_memory_in_Mb (line 22) | def get_total_and_free_memory_in_Mb(cuda_device):
  function occupy_mem (line 31) | def occupy_mem(cuda_device, mem_ratio=0.95):
  function gpu_mem_usage (line 43) | def gpu_mem_usage():
  class AverageMeter (line 51) | class AverageMeter:
    method __init__ (line 56) | def __init__(self, window_size=50):
    method update (line 61) | def update(self, value):
    method median (line 67) | def median(self):
    method avg (line 72) | def avg(self):
    method global_avg (line 78) | def global_avg(self):
    method latest (line 82) | def latest(self):
    method total (line 86) | def total(self):
    method reset (line 89) | def reset(self):
    method clear (line 94) | def clear(self):
  class MeterBuffer (line 98) | class MeterBuffer(defaultdict):
    method __init__ (line 101) | def __init__(self, window_size=20):
    method reset (line 105) | def reset(self):
    method get_filtered_meter (line 109) | def get_filtered_meter(self, filter_key="time"):
    method update (line 112) | def update(self, values=None, **kwargs):
    method clear_meters (line 121) | def clear_meters(self):

FILE: yolox/utils/model_utils.py
  function get_model_info (line 19) | def get_model_info(model, tsize):
  function fuse_conv_and_bn (line 31) | def fuse_conv_and_bn(conv, bn):
  function fuse_model (line 66) | def fuse_model(model):
  function replace_module (line 77) | def replace_module(module, replaced_module_type, new_module_type, replac...

FILE: yolox/utils/setup_env.py
  function configure_nccl (line 13) | def configure_nccl():
  function configure_module (line 25) | def configure_module(ulimit_value=8192):

FILE: yolox/utils/visualize.py
  function vis (line 11) | def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None):
  function get_color (line 45) | def get_color(idx):
  function plot_tracking (line 52) | def plot_tracking(image, tlwhs, obj_ids, scores=None, frame_id=0, fps=0....
Condensed preview — 99 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (588K chars).
[
  {
    "path": ".gitattributes",
    "chars": 125,
    "preview": "README.assets/MOT20.gif filter=lfs diff=lfs merge=lfs -text\nREADME.assets/dancetrack.gif filter=lfs diff=lfs merge=lfs -"
  },
  {
    "path": ".gitignore",
    "chars": 2030,
    "preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\ndatasets/*\n# Distribution"
  },
  {
    "path": "LICENSE",
    "chars": 19334,
    "preview": "\nAttribution-NonCommercial 4.0 International\n\n=======================================================================\n\nC"
  },
  {
    "path": "README.md",
    "chars": 9087,
    "preview": "## DiffusionTrack:Diffusion Model For Multi-Object Tracking\n\n**DiffusionTrack is the first work of diffusion model for m"
  },
  {
    "path": "diffusion/models/diffusion_head.py",
    "chars": 24515,
    "preview": "import math\nimport random\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn.functional as F\nfrom torch im"
  },
  {
    "path": "diffusion/models/diffusion_losses.py",
    "chars": 26352,
    "preview": "import torch\nimport torch.nn.functional as F\nfrom torch import nn\nfrom fvcore.nn import sigmoid_focal_loss_jit\nimport to"
  },
  {
    "path": "diffusion/models/diffusion_models.py",
    "chars": 29048,
    "preview": "import copy\nimport math\n\nimport numpy as np\nimport torch\nfrom torch import einsum, nn\nimport torch.nn.functional as F\n\nf"
  },
  {
    "path": "diffusion/models/diffusionnet.py",
    "chars": 3025,
    "preview": "import math\nimport random\nfrom typing import List\nfrom collections import namedtuple\n\nimport torch\nimport torch.nn.funct"
  },
  {
    "path": "exps/default/nano.py",
    "chars": 1320,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\nimport torch.n"
  },
  {
    "path": "exps/default/yolov3.py",
    "chars": 3006,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\nimport torch\ni"
  },
  {
    "path": "exps/default/yolox_l.py",
    "chars": 355,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom yolox.ex"
  },
  {
    "path": "exps/default/yolox_m.py",
    "chars": 357,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom yolox.ex"
  },
  {
    "path": "exps/default/yolox_s.py",
    "chars": 357,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom yolox.ex"
  },
  {
    "path": "exps/default/yolox_tiny.py",
    "chars": 496,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom yolox.ex"
  },
  {
    "path": "exps/default/yolox_x.py",
    "chars": 357,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport os\n\nfrom yolox.ex"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_det_dancetrack.py",
    "chars": 5757,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_det_mot17.py",
    "chars": 5745,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_det_mot17_ablation.py",
    "chars": 5753,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_det_mot20.py",
    "chars": 5752,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_dancetrack.py",
    "chars": 5816,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_dancetrack_baseline.py",
    "chars": 5814,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_mot17.py",
    "chars": 5805,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_mot17_ablation.py",
    "chars": 5843,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_mot17_baseline.py",
    "chars": 5800,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_mot20.py",
    "chars": 5808,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "exps/example/mot/yolox_x_diffusion_track_mot20_baseline.py",
    "chars": 5808,
    "preview": "# encoding: utf-8\nimport os\nimport random\nimport torch\nimport torch.nn as nn\nimport torch.distributed as dist\nfrom torch"
  },
  {
    "path": "requirements.txt",
    "chars": 144,
    "preview": "numpy\ntorch>=1.7\nopencv_python\nloguru\nscikit-image\ntqdm\ntorchvision>=0.10.0\nPillow\nthop\nninja\ntabulate\ntensorboard\nlap\nm"
  },
  {
    "path": "setup.py",
    "chars": 1696,
    "preview": "#!/usr/bin/env python\n# Copyright (c) Megvii, Inc. and its affiliates. All Rights Reserved\n\nimport re\nimport setuptools\n"
  },
  {
    "path": "tools/convert_bdd100k_to_coco.py",
    "chars": 4656,
    "preview": "import cv2\nimport os\nimport json\nimport tqdm\nimport numpy as np\n\nlabels_path = 'datasets/bdd100k/labels'\nimg_path = 'dat"
  },
  {
    "path": "tools/convert_cityperson_to_coco.py",
    "chars": 2482,
    "preview": "import os\nimport numpy as np\nimport json\nfrom PIL import Image\n\nDATA_PATH = 'datasets/Cityscapes/'\nDATA_FILE_PATH = 'dat"
  },
  {
    "path": "tools/convert_crowdhuman_to_coco.py",
    "chars": 2288,
    "preview": "import os\nimport numpy as np\nimport json\nfrom PIL import Image\n\nDATA_PATH = 'datasets/crowdhuman/'\nOUT_PATH = DATA_PATH "
  },
  {
    "path": "tools/convert_dancetrack_to_coco.py",
    "chars": 6209,
    "preview": "import os\nimport numpy as np\nimport json\nimport cv2\n\n\n# Use the same script for MOT16\nDATA_PATH = 'datasets/dancetrack'\n"
  },
  {
    "path": "tools/convert_ethz_to_coco.py",
    "chars": 2468,
    "preview": "import os\nimport numpy as np\nimport json\nfrom PIL import Image\n\nDATA_PATH = 'datasets/ETHZ/'\nDATA_FILE_PATH = 'datasets/"
  },
  {
    "path": "tools/convert_kitti_to_coco.py",
    "chars": 7472,
    "preview": "from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport pic"
  },
  {
    "path": "tools/convert_mot17_to_coco.py",
    "chars": 6754,
    "preview": "import os\nimport numpy as np\nimport json\nimport cv2\n\n\n# Use the same script for MOT16\nDATA_PATH = 'datasets/mot'\nOUT_PAT"
  },
  {
    "path": "tools/convert_mot20_to_coco.py",
    "chars": 6690,
    "preview": "import os\nimport numpy as np\nimport json\nimport cv2\n\n\n# Use the same script for MOT16\nDATA_PATH = 'datasets/MOT20'\nOUT_P"
  },
  {
    "path": "tools/convert_video.py",
    "chars": 859,
    "preview": "import cv2\n\ndef convert_video(video_path):\n    cap = cv2.VideoCapture(video_path)\n    width = cap.get(cv2.CAP_PROP_FRAME"
  },
  {
    "path": "tools/mix_data_ablation.py",
    "chars": 2491,
    "preview": "import json\nimport os\n\n\n\"\"\"\ncd datasets\nmkdir -p mix_mot_ch/annotations\ncp mot/annotations/val_half.json mix_mot_ch/anno"
  },
  {
    "path": "tools/mix_data_bdd100k.py",
    "chars": 1999,
    "preview": "import json\nimport os\nimport numpy as np\n\n\"\"\"\ncd datasets\nmkdir -p mix_det/annotations\ncp mot/annotations/val_half.json "
  },
  {
    "path": "tools/mix_data_test_mot17.py",
    "chars": 3933,
    "preview": "import json\nimport os\n\n\n\"\"\"\ncd datasets\nmkdir -p mix_det/annotations\ncp mot/annotations/val_half.json mix_det/annotation"
  },
  {
    "path": "tools/mix_data_test_mot20.py",
    "chars": 2442,
    "preview": "import json\nimport os\n\n\n\"\"\"\ncd datasets\nmkdir -p mix_mot20_ch/annotations\ncp MOT20/annotations/val_half.json mix_mot20_c"
  },
  {
    "path": "tools/mota.py",
    "chars": 3649,
    "preview": "from loguru import logger\nimport numpy as np\nnp.float = float\nnp.int = int\nnp.object = object\nnp.bool = bool\nimport torc"
  },
  {
    "path": "tools/track.py",
    "chars": 11051,
    "preview": "from loguru import logger\nimport numpy as np\nnp.float = float\nnp.int = int\nnp.object = object\nnp.bool = bool\nimport sys\n"
  },
  {
    "path": "tools/train.py",
    "chars": 3839,
    "preview": "from loguru import logger\nimport numpy as np\nnp.float = float\nnp.int = int\nnp.object = object\nnp.bool = bool\nimport torc"
  },
  {
    "path": "tools/txt2video.py",
    "chars": 7194,
    "preview": "import os\nimport sys\nimport json\nimport cv2\nimport glob as gb\nimport numpy as np\n\n\ndef colormap(rgb=False):\n    color_li"
  },
  {
    "path": "yolox/__init__.py",
    "chars": 126,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\nfrom .utils import configure_module\n\nconfigure_module()\n\n__version__ = \"0"
  },
  {
    "path": "yolox/core/__init__.py",
    "chars": 152,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom .launch import laun"
  },
  {
    "path": "yolox/core/launch.py",
    "chars": 7038,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Code are based on\n# https://github.com/facebookresearch/detectron2/blob/"
  },
  {
    "path": "yolox/core/trainer.py",
    "chars": 11554,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom loguru import logge"
  },
  {
    "path": "yolox/data/__init__.py",
    "chars": 376,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom .data_augment impor"
  },
  {
    "path": "yolox/data/data_augment.py",
    "chars": 14222,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\"\"\"\nData augmentation fun"
  },
  {
    "path": "yolox/data/data_prefetcher.py",
    "chars": 3232,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport torch\nimport torc"
  },
  {
    "path": "yolox/data/dataloading.py",
    "chars": 6231,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport torch\nfrom torch."
  },
  {
    "path": "yolox/data/samplers.py",
    "chars": 3356,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nimport torch\nimport torc"
  },
  {
    "path": "yolox/evaluators/__init__.py",
    "chars": 261,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom .coco_evaluator imp"
  },
  {
    "path": "yolox/evaluators/coco_evaluator.py",
    "chars": 7897,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) Megvii, Inc. and its affiliates.\n\nfrom loguru import logge"
  },
  {
    "path": "yolox/evaluators/diffusion_mot_evaluator.py",
    "chars": 13357,
    "preview": "from collections import defaultdict\nfrom loguru import logger\nfrom tqdm import tqdm\n\nimport torch\n\nfrom yolox.utils impo"
  },
  {
    "path": "yolox/evaluators/diffusion_mot_evaluator_kl.py",
    "chars": 11725,
    "preview": "from collections import defaultdict\nfrom loguru import logger\nfrom tqdm import tqdm\n\nimport torch\n\nfrom yolox.utils impo"
  },
  {
    "path": "yolox/evaluators/evaluation.py",
    "chars": 6562,
    "preview": "import os\nimport numpy as np\nimport copy\nimport motmetrics as mm\nmm.lap.default_solver = 'lap'\n\n\nclass Evaluator(object)"
  },
  {
    "path": "yolox/exp/__init__.py",
    "chars": 191,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom .base_exp"
  },
  {
    "path": "yolox/exp/base_exp.py",
    "chars": 2013,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nf"
  },
  {
    "path": "yolox/exp/build.py",
    "chars": 1493,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport importl"
  },
  {
    "path": "yolox/exp/yolox_base.py",
    "chars": 8262,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\ni"
  },
  {
    "path": "yolox/layers/__init__.py",
    "chars": 151,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom .fast_coc"
  },
  {
    "path": "yolox/layers/csrc/cocoeval/cocoeval.cpp",
    "chars": 20824,
    "preview": "// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n#include \"cocoeval.h\"\n#include <time.h>\n#include"
  },
  {
    "path": "yolox/layers/csrc/cocoeval/cocoeval.h",
    "chars": 3485,
    "preview": "// Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n#pragma once\n\n#include <pybind11/numpy.h>\n#inclu"
  },
  {
    "path": "yolox/layers/csrc/vision.cpp",
    "chars": 524,
    "preview": "#include \"cocoeval/cocoeval.h\"\n\nPYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {\n    m.def(\"COCOevalAccumulate\", &COCOeval::Ac"
  },
  {
    "path": "yolox/layers/fast_coco_eval_api.py",
    "chars": 5757,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# This file comes from\n# https://github.com/facebookresearch/detectron2/bl"
  },
  {
    "path": "yolox/models/__init__.py",
    "chars": 297,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom .darknet "
  },
  {
    "path": "yolox/models/darknet.py",
    "chars": 6246,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom torch i"
  },
  {
    "path": "yolox/models/losses.py",
    "chars": 2900,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch"
  },
  {
    "path": "yolox/models/network_blocks.py",
    "chars": 6102,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch"
  },
  {
    "path": "yolox/models/yolo_fpn.py",
    "chars": 2486,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch"
  },
  {
    "path": "yolox/models/yolo_head.py",
    "chars": 24079,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom loguru im"
  },
  {
    "path": "yolox/models/yolo_pafpn.py",
    "chars": 3541,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch"
  },
  {
    "path": "yolox/models/yolox.py",
    "chars": 1373,
    "preview": "#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch"
  },
  {
    "path": "yolox/tracker/basetrack.py",
    "chars": 951,
    "preview": "import numpy as np\nfrom collections import OrderedDict\n\n\nclass TrackState(object):\n    New = 0\n    Tracked = 1\n    Lost "
  },
  {
    "path": "yolox/tracker/diffusion_tracker.py",
    "chars": 20967,
    "preview": "import numpy as np\nfrom collections import deque\n\nimport torch\nimport torch.nn.functional as F \nimport torchvision\nfrom "
  },
  {
    "path": "yolox/tracker/diffusion_tracker_kl.py",
    "chars": 23624,
    "preview": "import numpy as np\nfrom collections import deque\nimport time\nimport torch\nimport torch.nn.functional as F \nimport torchv"
  },
  {
    "path": "yolox/tracker/kalman_filter.py",
    "chars": 9547,
    "preview": "# vim: expandtab:ts=4:sw=4\nimport numpy as np\nimport scipy.linalg\n\n\n\"\"\"\nTable for the 0.95 quantile of the chi-square di"
  },
  {
    "path": "yolox/tracker/matching.py",
    "chars": 6196,
    "preview": "import cv2\nimport numpy as np\nimport scipy\nimport lap\nfrom scipy.spatial.distance import cdist\n\nfrom cython_bbox import "
  },
  {
    "path": "yolox/tracking_utils/evaluation.py",
    "chars": 4050,
    "preview": "import os\nimport numpy as np\nimport copy\nimport motmetrics as mm\nmm.lap.default_solver = 'lap'\n\nfrom yolox.tracking_util"
  },
  {
    "path": "yolox/tracking_utils/io.py",
    "chars": 3627,
    "preview": "import os\nfrom typing import Dict\nimport numpy as np\n\n\ndef write_results(filename, results_dict: Dict, data_type: str):\n"
  },
  {
    "path": "yolox/tracking_utils/timer.py",
    "chars": 958,
    "preview": "import time\n\n\nclass Timer(object):\n    \"\"\"A simple timer.\"\"\"\n    def __init__(self):\n        self.total_time = 0.\n      "
  },
  {
    "path": "yolox/utils/__init__.py",
    "chars": 450,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom .allreduc"
  },
  {
    "path": "yolox/utils/allreduce_norm.py",
    "chars": 2845,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\nf"
  },
  {
    "path": "yolox/utils/box_ops.py",
    "chars": 5263,
    "preview": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\"\"\"\nUtilities for bounding box manipulation and G"
  },
  {
    "path": "yolox/utils/boxes.py",
    "chars": 6752,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport numpy a"
  },
  {
    "path": "yolox/utils/checkpoint.py",
    "chars": 1331,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\nfrom loguru imp"
  },
  {
    "path": "yolox/utils/cluster_nms.py",
    "chars": 4493,
    "preview": "import torch\n\n@torch.jit.script\ndef intersect(box_a, box_b):\n    \"\"\" We resize both tensors to [A,B,2] without new mallo"
  },
  {
    "path": "yolox/utils/demo_utils.py",
    "chars": 2806,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport numpy a"
  },
  {
    "path": "yolox/utils/dist.py",
    "chars": 7228,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# This file mainly comes from\n# https://github.com/facebookresearch/detect"
  },
  {
    "path": "yolox/utils/ema.py",
    "chars": 2533,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\nimport torch\nim"
  },
  {
    "path": "yolox/utils/logger.py",
    "chars": 2751,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nfrom loguru im"
  },
  {
    "path": "yolox/utils/lr_scheduler.py",
    "chars": 6561,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport math\nfr"
  },
  {
    "path": "yolox/utils/metric.py",
    "chars": 3094,
    "preview": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\nimport numpy a"
  },
  {
    "path": "yolox/utils/model_utils.py",
    "chars": 3284,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport torch\ni"
  },
  {
    "path": "yolox/utils/setup_env.py",
    "chars": 1574,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport cv2\n\nim"
  },
  {
    "path": "yolox/utils/visualize.py",
    "chars": 4958,
    "preview": "#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.\n\nimport cv2\nimp"
  }
]

About this extraction

This page contains the full source code of the RainBowLuoCS/DiffusionTrack GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 99 files (550.2 KB), approximately 144.2k tokens, and a symbol index with 546 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!