master c4aa59eb5656 cached
445 files
2.6 MB
700.7k tokens
1435 symbols
1 requests
Download .txt
Showing preview only (2,794K chars total). Download the full file or copy to clipboard to get everything.
Repository: thuml/Transfer-Learning-Library
Branch: master
Commit: c4aa59eb5656
Files: 445
Total size: 2.6 MB

Directory structure:
gitextract_h0zbpfvz/

├── .github/
│   └── ISSUE_TEMPLATE/
│       ├── bug_report.md
│       ├── custom.md
│       └── feature_request.md
├── .gitignore
├── CONTRIBUTING.md
├── DATASETS.md
├── LICENSE
├── README.md
├── docs/
│   ├── Makefile
│   ├── conf.py
│   ├── index.rst
│   ├── make.bat
│   ├── requirements.txt
│   └── tllib/
│       ├── alignment/
│       │   ├── domain_adversarial.rst
│       │   ├── hypothesis_adversarial.rst
│       │   ├── index.rst
│       │   └── statistics_matching.rst
│       ├── modules.rst
│       ├── normalization.rst
│       ├── ranking.rst
│       ├── regularization.rst
│       ├── reweight.rst
│       ├── self_training.rst
│       ├── translation.rst
│       ├── utils/
│       │   ├── analysis.rst
│       │   ├── base.rst
│       │   ├── index.rst
│       │   └── metric.rst
│       └── vision/
│           ├── datasets.rst
│           ├── index.rst
│           ├── models.rst
│           └── transforms.rst
├── examples/
│   ├── domain_adaptation/
│   │   ├── image_classification/
│   │   │   ├── README.md
│   │   │   ├── adda.py
│   │   │   ├── adda.sh
│   │   │   ├── afn.py
│   │   │   ├── afn.sh
│   │   │   ├── bsp.py
│   │   │   ├── bsp.sh
│   │   │   ├── cc_loss.py
│   │   │   ├── cc_loss.sh
│   │   │   ├── cdan.py
│   │   │   ├── cdan.sh
│   │   │   ├── dan.py
│   │   │   ├── dan.sh
│   │   │   ├── dann.py
│   │   │   ├── dann.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── fixmatch.py
│   │   │   ├── fixmatch.sh
│   │   │   ├── jan.py
│   │   │   ├── jan.sh
│   │   │   ├── mcc.py
│   │   │   ├── mcc.sh
│   │   │   ├── mcd.py
│   │   │   ├── mcd.sh
│   │   │   ├── mdd.py
│   │   │   ├── mdd.sh
│   │   │   ├── requirements.txt
│   │   │   ├── self_ensemble.py
│   │   │   ├── self_ensemble.sh
│   │   │   └── utils.py
│   │   ├── image_regression/
│   │   │   ├── README.md
│   │   │   ├── dann.py
│   │   │   ├── dann.sh
│   │   │   ├── dd.py
│   │   │   ├── dd.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── rsd.py
│   │   │   ├── rsd.sh
│   │   │   └── utils.py
│   │   ├── keypoint_detection/
│   │   │   ├── README.md
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── regda.py
│   │   │   ├── regda.sh
│   │   │   ├── regda_fast.py
│   │   │   └── regda_fast.sh
│   │   ├── object_detection/
│   │   │   ├── README.md
│   │   │   ├── config/
│   │   │   │   ├── faster_rcnn_R_101_C4_cityscapes.yaml
│   │   │   │   ├── faster_rcnn_R_101_C4_voc.yaml
│   │   │   │   ├── faster_rcnn_vgg_16_cityscapes.yaml
│   │   │   │   └── retinanet_R_101_FPN_voc.yaml
│   │   │   ├── cycle_gan.py
│   │   │   ├── cycle_gan.sh
│   │   │   ├── d_adapt/
│   │   │   │   ├── README.md
│   │   │   │   ├── bbox_adaptation.py
│   │   │   │   ├── category_adaptation.py
│   │   │   │   ├── config/
│   │   │   │   │   ├── faster_rcnn_R_101_C4_cityscapes.yaml
│   │   │   │   │   ├── faster_rcnn_R_101_C4_voc.yaml
│   │   │   │   │   ├── faster_rcnn_vgg_16_cityscapes.yaml
│   │   │   │   │   └── retinanet_R_101_FPN_voc.yaml
│   │   │   │   ├── d_adapt.py
│   │   │   │   └── d_adapt.sh
│   │   │   ├── oracle.sh
│   │   │   ├── prepare_cityscapes_to_voc.py
│   │   │   ├── requirements.txt
│   │   │   ├── source_only.py
│   │   │   ├── source_only.sh
│   │   │   ├── utils.py
│   │   │   ├── visualize.py
│   │   │   └── visualize.sh
│   │   ├── openset_domain_adaptation/
│   │   │   ├── README.md
│   │   │   ├── dann.py
│   │   │   ├── dann.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── osbp.py
│   │   │   ├── osbp.sh
│   │   │   └── utils.py
│   │   ├── partial_domain_adaptation/
│   │   │   ├── README.md
│   │   │   ├── afn.py
│   │   │   ├── afn.sh
│   │   │   ├── dann.py
│   │   │   ├── dann.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── iwan.py
│   │   │   ├── iwan.sh
│   │   │   ├── pada.py
│   │   │   ├── pada.sh
│   │   │   ├── requirements.txt
│   │   │   └── utils.py
│   │   ├── re_identification/
│   │   │   ├── README.md
│   │   │   ├── baseline.py
│   │   │   ├── baseline.sh
│   │   │   ├── baseline_cluster.py
│   │   │   ├── baseline_cluster.sh
│   │   │   ├── ibn.sh
│   │   │   ├── mmt.py
│   │   │   ├── mmt.sh
│   │   │   ├── requirements.txt
│   │   │   ├── spgan.py
│   │   │   ├── spgan.sh
│   │   │   └── utils.py
│   │   ├── semantic_segmentation/
│   │   │   ├── README.md
│   │   │   ├── advent.py
│   │   │   ├── advent.sh
│   │   │   ├── cycada.py
│   │   │   ├── cycada.sh
│   │   │   ├── cycle_gan.py
│   │   │   ├── cycle_gan.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── fda.py
│   │   │   └── fda.sh
│   │   ├── wilds_image_classification/
│   │   │   ├── README.md
│   │   │   ├── cdan.py
│   │   │   ├── cdan.sh
│   │   │   ├── dan.py
│   │   │   ├── dan.sh
│   │   │   ├── dann.py
│   │   │   ├── dann.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── fixmatch.py
│   │   │   ├── fixmatch.sh
│   │   │   ├── jan.py
│   │   │   ├── jan.sh
│   │   │   ├── mdd.py
│   │   │   ├── mdd.sh
│   │   │   ├── requirements.txt
│   │   │   └── utils.py
│   │   ├── wilds_ogb_molpcba/
│   │   │   ├── README.md
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── gin.py
│   │   │   ├── requirements.txt
│   │   │   └── utils.py
│   │   ├── wilds_poverty/
│   │   │   ├── README.md
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── requirements.txt
│   │   │   ├── resnet_ms.py
│   │   │   └── utils.py
│   │   └── wilds_text/
│   │       ├── README.md
│   │       ├── erm.py
│   │       ├── erm.sh
│   │       ├── requirements.txt
│   │       └── utils.py
│   ├── domain_generalization/
│   │   ├── image_classification/
│   │   │   ├── README.md
│   │   │   ├── coral.py
│   │   │   ├── coral.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── groupdro.py
│   │   │   ├── groupdro.sh
│   │   │   ├── ibn.sh
│   │   │   ├── irm.py
│   │   │   ├── irm.sh
│   │   │   ├── mixstyle.py
│   │   │   ├── mixstyle.sh
│   │   │   ├── mldg.py
│   │   │   ├── mldg.sh
│   │   │   ├── requirements.txt
│   │   │   ├── utils.py
│   │   │   ├── vrex.py
│   │   │   └── vrex.sh
│   │   └── re_identification/
│   │       ├── README.md
│   │       ├── baseline.py
│   │       ├── baseline.sh
│   │       ├── ibn.sh
│   │       ├── mixstyle.py
│   │       ├── mixstyle.sh
│   │       ├── requirements.txt
│   │       └── utils.py
│   ├── model_selection/
│   │   ├── README.md
│   │   ├── hscore.py
│   │   ├── hscore.sh
│   │   ├── leep.py
│   │   ├── leep.sh
│   │   ├── logme.py
│   │   ├── logme.sh
│   │   ├── nce.py
│   │   ├── nce.sh
│   │   ├── requirements.txt
│   │   └── utils.py
│   ├── semi_supervised_learning/
│   │   └── image_classification/
│   │       ├── README.md
│   │       ├── convert_moco_to_pretrained.py
│   │       ├── debiasmatch.py
│   │       ├── debiasmatch.sh
│   │       ├── dst.py
│   │       ├── dst.sh
│   │       ├── erm.py
│   │       ├── erm.sh
│   │       ├── fixmatch.py
│   │       ├── fixmatch.sh
│   │       ├── flexmatch.py
│   │       ├── flexmatch.sh
│   │       ├── mean_teacher.py
│   │       ├── mean_teacher.sh
│   │       ├── noisy_student.py
│   │       ├── noisy_student.sh
│   │       ├── pi_model.py
│   │       ├── pi_model.sh
│   │       ├── pseudo_label.py
│   │       ├── pseudo_label.sh
│   │       ├── requirements.txt
│   │       ├── self_tuning.py
│   │       ├── self_tuning.sh
│   │       ├── uda.py
│   │       ├── uda.sh
│   │       └── utils.py
│   └── task_adaptation/
│       └── image_classification/
│           ├── README.md
│           ├── bi_tuning.py
│           ├── bi_tuning.sh
│           ├── bss.py
│           ├── bss.sh
│           ├── co_tuning.py
│           ├── co_tuning.sh
│           ├── convert_moco_to_pretrained.py
│           ├── delta.py
│           ├── delta.sh
│           ├── erm.py
│           ├── erm.sh
│           ├── lwf.py
│           ├── lwf.sh
│           ├── requirements.txt
│           ├── stochnorm.py
│           ├── stochnorm.sh
│           └── utils.py
├── requirements.txt
├── setup.py
└── tllib/
    ├── __init__.py
    ├── alignment/
    │   ├── __init__.py
    │   ├── adda.py
    │   ├── advent.py
    │   ├── bsp.py
    │   ├── cdan.py
    │   ├── coral.py
    │   ├── d_adapt/
    │   │   ├── __init__.py
    │   │   ├── feedback.py
    │   │   ├── modeling/
    │   │   │   ├── __init__.py
    │   │   │   ├── matcher.py
    │   │   │   ├── meta_arch/
    │   │   │   │   ├── __init__.py
    │   │   │   │   ├── rcnn.py
    │   │   │   │   └── retinanet.py
    │   │   │   └── roi_heads/
    │   │   │       ├── __init__.py
    │   │   │       ├── fast_rcnn.py
    │   │   │       └── roi_heads.py
    │   │   └── proposal.py
    │   ├── dan.py
    │   ├── dann.py
    │   ├── jan.py
    │   ├── mcd.py
    │   ├── mdd.py
    │   ├── osbp.py
    │   ├── regda.py
    │   └── rsd.py
    ├── modules/
    │   ├── __init__.py
    │   ├── classifier.py
    │   ├── domain_discriminator.py
    │   ├── entropy.py
    │   ├── gl.py
    │   ├── grl.py
    │   ├── kernels.py
    │   ├── loss.py
    │   └── regressor.py
    ├── normalization/
    │   ├── __init__.py
    │   ├── afn.py
    │   ├── ibn.py
    │   ├── mixstyle/
    │   │   ├── __init__.py
    │   │   ├── resnet.py
    │   │   └── sampler.py
    │   └── stochnorm.py
    ├── ranking/
    │   ├── __init__.py
    │   ├── hscore.py
    │   ├── leep.py
    │   ├── logme.py
    │   ├── nce.py
    │   └── transrate.py
    ├── regularization/
    │   ├── __init__.py
    │   ├── bi_tuning.py
    │   ├── bss.py
    │   ├── co_tuning.py
    │   ├── delta.py
    │   ├── knowledge_distillation.py
    │   └── lwf.py
    ├── reweight/
    │   ├── __init__.py
    │   ├── groupdro.py
    │   ├── iwan.py
    │   └── pada.py
    ├── self_training/
    │   ├── __init__.py
    │   ├── cc_loss.py
    │   ├── dst.py
    │   ├── flexmatch.py
    │   ├── mcc.py
    │   ├── mean_teacher.py
    │   ├── pi_model.py
    │   ├── pseudo_label.py
    │   ├── self_ensemble.py
    │   ├── self_tuning.py
    │   └── uda.py
    ├── translation/
    │   ├── __init__.py
    │   ├── cycada.py
    │   ├── cyclegan/
    │   │   ├── __init__.py
    │   │   ├── discriminator.py
    │   │   ├── generator.py
    │   │   ├── loss.py
    │   │   ├── transform.py
    │   │   └── util.py
    │   ├── fourier_transform.py
    │   └── spgan/
    │       ├── __init__.py
    │       ├── loss.py
    │       └── siamese.py
    ├── utils/
    │   ├── __init__.py
    │   ├── analysis/
    │   │   ├── __init__.py
    │   │   ├── a_distance.py
    │   │   └── tsne.py
    │   ├── data.py
    │   ├── logger.py
    │   ├── meter.py
    │   ├── metric/
    │   │   ├── __init__.py
    │   │   ├── keypoint_detection.py
    │   │   └── reid.py
    │   └── scheduler.py
    └── vision/
        ├── __init__.py
        ├── datasets/
        │   ├── __init__.py
        │   ├── _util.py
        │   ├── aircrafts.py
        │   ├── caltech101.py
        │   ├── cifar.py
        │   ├── coco70.py
        │   ├── cub200.py
        │   ├── digits.py
        │   ├── domainnet.py
        │   ├── dtd.py
        │   ├── eurosat.py
        │   ├── food101.py
        │   ├── imagelist.py
        │   ├── imagenet_r.py
        │   ├── imagenet_sketch.py
        │   ├── keypoint_detection/
        │   │   ├── __init__.py
        │   │   ├── freihand.py
        │   │   ├── hand_3d_studio.py
        │   │   ├── human36m.py
        │   │   ├── keypoint_dataset.py
        │   │   ├── lsp.py
        │   │   ├── rendered_hand_pose.py
        │   │   ├── surreal.py
        │   │   └── util.py
        │   ├── object_detection/
        │   │   └── __init__.py
        │   ├── office31.py
        │   ├── officecaltech.py
        │   ├── officehome.py
        │   ├── openset/
        │   │   └── __init__.py
        │   ├── oxfordflowers.py
        │   ├── oxfordpets.py
        │   ├── pacs.py
        │   ├── partial/
        │   │   ├── __init__.py
        │   │   ├── caltech_imagenet.py
        │   │   └── imagenet_caltech.py
        │   ├── patchcamelyon.py
        │   ├── regression/
        │   │   ├── __init__.py
        │   │   ├── dsprites.py
        │   │   ├── image_regression.py
        │   │   └── mpi3d.py
        │   ├── reid/
        │   │   ├── __init__.py
        │   │   ├── basedataset.py
        │   │   ├── convert.py
        │   │   ├── dukemtmc.py
        │   │   ├── market1501.py
        │   │   ├── msmt17.py
        │   │   ├── personx.py
        │   │   └── unreal.py
        │   ├── resisc45.py
        │   ├── retinopathy.py
        │   ├── segmentation/
        │   │   ├── __init__.py
        │   │   ├── cityscapes.py
        │   │   ├── gta5.py
        │   │   ├── segmentation_list.py
        │   │   └── synthia.py
        │   ├── stanford_cars.py
        │   ├── stanford_dogs.py
        │   ├── sun397.py
        │   └── visda2017.py
        ├── models/
        │   ├── __init__.py
        │   ├── digits.py
        │   ├── keypoint_detection/
        │   │   ├── __init__.py
        │   │   ├── loss.py
        │   │   └── pose_resnet.py
        │   ├── object_detection/
        │   │   ├── __init__.py
        │   │   ├── backbone/
        │   │   │   ├── __init__.py
        │   │   │   ├── mmdetection/
        │   │   │   │   ├── vgg.py
        │   │   │   │   └── weight_init.py
        │   │   │   └── vgg.py
        │   │   ├── meta_arch/
        │   │   │   ├── __init__.py
        │   │   │   ├── rcnn.py
        │   │   │   └── retinanet.py
        │   │   ├── proposal_generator/
        │   │   │   ├── __init__.py
        │   │   │   └── rpn.py
        │   │   └── roi_heads/
        │   │       ├── __init__.py
        │   │       └── roi_heads.py
        │   ├── reid/
        │   │   ├── __init__.py
        │   │   ├── identifier.py
        │   │   ├── loss.py
        │   │   └── resnet.py
        │   ├── resnet.py
        │   └── segmentation/
        │       ├── __init__.py
        │       └── deeplabv2.py
        └── transforms/
            ├── __init__.py
            ├── keypoint_detection.py
            └── segmentation.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .github/ISSUE_TEMPLATE/bug_report.md
================================================
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''

---

**Describe the bug**
A clear and concise description of what the bug is.

**To Reproduce**
Steps to reproduce the behavior:
1. Go to '...'
2. Click on '....'
3. Scroll down to '....'
4. See error

**Expected behavior**
A clear and concise description of what you expected to happen.

**Screenshots**
If applicable, add screenshots to help explain your problem.

**Desktop (please complete the following information):**
 - OS: [e.g. iOS]
 - Browser [e.g. chrome, safari]
 - Version [e.g. 22]

**Smartphone (please complete the following information):**
 - Device: [e.g. iPhone6]
 - OS: [e.g. iOS8.1]
 - Browser [e.g. stock browser, safari]
 - Version [e.g. 22]

**Additional context**
Add any other context about the problem here.


================================================
FILE: .github/ISSUE_TEMPLATE/custom.md
================================================
---
name: Custom issue template
about: Describe this issue template's purpose here.
title: ''
labels: ''
assignees: ''

---




================================================
FILE: .github/ISSUE_TEMPLATE/feature_request.md
================================================
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''

---

**Is your feature request related to a problem? Please describe.**
A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]

**Describe the solution you'd like**
A clear and concise description of what you want to happen.

**Describe alternatives you've considered**
A clear and concise description of any alternative solutions or features you've considered.

**Additional context**
Add any other context or screenshots about the feature request here.


================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/
docs/build/*
docs/pytorch_sphinx_theme/*

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
.python-version

# pipenv
#   According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
#   However, in case of collaboration, if having platform-specific dependencies or dependencies
#   having no cross-platform support, pipenv may install dependencies that don't work, or not
#   install all needed dependencies.
#Pipfile.lock

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

.idea/

exp/*
trash/*
examples/domain_adaptation/digits/logs/*
examples/domain_adaptation/digits/data/*
+.DS_Store
*/.DS_Store


================================================
FILE: CONTRIBUTING.md
================================================
## Contributing to Transfer-Learning-Library

All kinds of contributions are welcome, including but not limited to the following.

- Fix typo or bugs
- Add documentation
- Add new features and components

### Workflow

1. fork and pull the latest Transfer-Learning-Library repository
2. checkout a new branch (do not use master branch for PRs)
3. commit your changes
4. create a PR

```{note}
If you plan to add some new features that involve large changes, it is encouraged to open an issue for discussion first.
```


================================================
FILE: DATASETS.md
================================================
## Notice (2023-08-01)

### Transfer-Learning-Library Dataset Link Failure Issue
Dear users,

We sincerely apologize to inform you that the dataset links of Transfer-Learning-Library have become invalid due to a cloud storage failure, resulting in many users being unable to download the datasets properly recently.

We are working diligently to resolve this issue and plan to restore the links as soon as possible. Currently, we have already restored some dataset links, and they have been updated on the master branch. You can obtain the latest version by running "git pull."

As the version on PyPI has not been updated yet, please temporarily uninstall the old version by running "pip uninstall tllib" before installing the new one.

In the future, we are planning to store the datasets on both Baidu Cloud and Google Cloud to provide more stable download links.

Additionally, a small portion of datasets that were backed up on our local server have also been lost due to a hard disk failure. For these datasets, we need to re-download and verify them, which might take longer to restore the links.

Within this week, we will release the updated dataset and confirm the list of datasets without backup. For datasets without backup, if you have previously downloaded them locally, please contact us via email. Your support is highly appreciated.

Once again, we apologize for any inconvenience caused and thank you for your understanding.

Sincerely,

The Transfer-Learning-Library Team

## Update (2023-08-09)

Most of the dataset links have been restored at present. The confirmed datasets without backups are as follows:

- Classification
  - COCO70
  - EuroSAT
  - PACS
  - PatchCamelyon
  - [Partial Domain Adaptation]
    - CaltechImageNet

- Keypoint Detection
  - Hand3DStudio
  - LSP
  - SURREAL

- Object Detection
  - Comic

- Re-Identification
  - PersonX
  - UnrealPerson

**For these datasets, if you had previously downloaded them locally, please contact us via email. We greatly appreciate everyone's support.**

## Notice (2023-08-01)

### Transfer-Learning-Library数据集链接失效问题

各位使用者,我们很抱歉通知大家,最近Transfer-Learning-Library的数据集链接因为云盘故障而失效,导致很多使用者无法正常下载数据集。

我们正在全力以赴解决这一问题,并计划在最短的时间内恢复链接。目前我们已经恢复了部分数据集链接,更新在master分支上,您可以通过git pull来获取最新的版本。

由于pypi上的版本还未更新,暂时请首先通过pip uninstall tllib卸载旧版本。

日后我们计划将数据集存储在百度云和谷歌云上,提供更加稳定的下载链接。

另外,小部分数据集在我们本地服务器上的备份也由于硬盘故障而丢失,对于这些数据集我们需要重新下载并验证,可能需要更长的时间来恢复链接。

我们会在本周内发布已经更新的数据集和确认无备份的数据集列表,对于无备份的数据集,如果您之前有下载到本地,请通过邮件联系我们,非常感谢大家的支持。

再次向您表达我们的歉意,并感谢您的理解。

Transfer-Learning-Library团队

## Update (2023-08-09)

目前大部分数据集的链接已经恢复,确认无备份的数据集如下:

- Classification
  - COCO70
  - EuroSAT
  - PACS
  - PatchCamelyon
  - [Partial Domain Adaptation]
    - CaltechImageNet

- Keypoint Detection
  - Hand3DStudio
  - LSP
  - SURREAL

- Object Detection
  - Comic

- Re-Identification
  - PersonX
  - UnrealPerson

**对于这些数据集,如果您之前有下载到本地,请通过邮件联系我们,非常感谢大家的支持。**


================================================
FILE: LICENSE
================================================
Copyright (c) 2018 The Python Packaging Authority

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

================================================
FILE: README.md
================================================
<div align='center' margin-bottom:40px> <img src="logo.png" width=200/> </div>

# Transfer Learning Library

- [Introduction](#introduction)
- [Updates](#updates)
- [Supported Methods](#supported-methods)
- [Installation](#installation)
- [Documentation](#documentation)
- [Contact](#contact)
- [Citation](#citation)

## Update (2024-03-15)

We upload an offline version of documentation [here](/docs/html.zip). You can download and unzip it to view the documentation.

## Notice (2023-08-09)

A note on broken dataset links can be found here: [DATASETS.md](DATASETS.md).

## Introduction
*TLlib* is an open-source and well-documented library for Transfer Learning. It is based on pure PyTorch with high performance and friendly API. Our code is pythonic, and the design is consistent with torchvision. You can easily develop new algorithms, or readily apply existing algorithms.

Our _API_ is divided by methods, which include: 
- domain alignment methods (tllib.aligment)
- domain translation methods (tllib.translation)
- self-training methods (tllib.self_training)
- regularization methods (tllib.regularization)
- data reweighting/resampling methods (tllib.reweight)
- model ranking/selection methods (tllib.ranking)
- normalization-based methods (tllib.normalization)

<img src="Tllib.png">

We provide many example codes in the directory _examples_, which is divided by learning setups. Currently, the supported learning setups include:
- DA (domain adaptation)
- TA (task adaptation, also known as finetune)
- OOD (out-of-distribution generalization, also known as DG / domain generalization)
- SSL (semi-supervised learning)
- Model Selection 

Our supported tasks include: classification, regression, object detection, segmentation, keypoint detection, and so on.

## Updates 

### 2022.9

We support installing *TLlib* via `pip`, which is experimental currently.

```shell
pip install -i https://test.pypi.org/simple/ tllib==0.4
```

### 2022.8
We release `v0.4` of *TLlib*. Previous versions of *TLlib* can be found [here](https://github.com/thuml/Transfer-Learning-Library/releases). In `v0.4`, we add implementations of 
the following methods:
- Domain Adaptation for Object Detection [[Code]](/examples/domain_adaptation/object_detection) [[API]](/tllib/alignment/d_adapt)
- Pre-trained Model Selection [[Code]](/examples/model_selection) [[API]](/tllib/ranking)
- Semi-supervised Learning for Classification [[Code]](/examples/semi_supervised_learning/image_classification/) [[API]](/tllib/self_training)

Besides, we maintain a collection of **_awesome papers in Transfer Learning_** in another repo [_A Roadmap for Transfer Learning_](https://github.com/thuml/A-Roadmap-for-Transfer-Learning).

### 2022.2
We adjusted our API following our survey [Transferablity in Deep Learning](https://arxiv.org/abs/2201.05867).

## Supported Methods
The currently supported algorithms include:

##### Domain Adaptation for Classification [[Code]](/examples/domain_adaptation/image_classification)
- **DANN** - Unsupervised Domain Adaptation by Backpropagation [[ICML 2015]](http://proceedings.mlr.press/v37/ganin15.pdf) [[Code]](/examples/domain_adaptation/image_classification/dann.py)
- **DAN** - Learning Transferable Features with Deep Adaptation Networks [[ICML 2015]](http://ise.thss.tsinghua.edu.cn/~mlong/doc/deep-adaptation-networks-icml15.pdf) [[Code]](/examples/domain_adaptation/image_classification/dan.py)
- **JAN** - Deep Transfer Learning with Joint Adaptation Networks [[ICML 2017]](http://ise.thss.tsinghua.edu.cn/~mlong/doc/joint-adaptation-networks-icml17.pdf) [[Code]](/examples/domain_adaptation/image_classification/jan.py)
- **ADDA** - Adversarial Discriminative Domain Adaptation [[CVPR 2017]](http://openaccess.thecvf.com/content_cvpr_2017/papers/Tzeng_Adversarial_Discriminative_Domain_CVPR_2017_paper.pdf) [[Code]](/examples/domain_adaptation/image_classification/adda.py)
- **CDAN** - Conditional Adversarial Domain Adaptation [[NIPS 2018]](http://papers.nips.cc/paper/7436-conditional-adversarial-domain-adaptation) [[Code]](/examples/domain_adaptation/image_classification/cdan.py) 
- **MCD** - Maximum Classifier Discrepancy for Unsupervised Domain Adaptation [[CVPR 2018]](http://openaccess.thecvf.com/content_cvpr_2018/papers/Saito_Maximum_Classifier_Discrepancy_CVPR_2018_paper.pdf) [[Code]](/examples/domain_adaptation/image_classification/mcd.py)
- **MDD** - Bridging Theory and Algorithm for Domain Adaptation [[ICML 2019]](http://proceedings.mlr.press/v97/zhang19i/zhang19i.pdf) [[Code]](/examples/domain_adaptation/image_classification/mdd.py) 
- **BSP** - Transferability vs. Discriminability: Batch Spectral Penalization for Adversarial Domain Adaptation [[ICML 2019]](http://proceedings.mlr.press/v97/chen19i/chen19i.pdf) [[Code]](/examples/domain_adaptation/image_classification/bsp.py) 
- **MCC** - Minimum Class Confusion for Versatile Domain Adaptation [[ECCV 2020]](http://www.ecva.net/papers/eccv_2020/papers_ECCV/papers/123660460.pdf) [[Code]](/examples/domain_adaptation/image_classification/mcc.py)

##### Domain Adaptation for Object Detection [[Code]](/examples/domain_adaptation/object_detection)
- **CycleGAN** - Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks [[ICCV 2017]](https://openaccess.thecvf.com/content_ICCV_2017/papers/Zhu_Unpaired_Image-To-Image_Translation_ICCV_2017_paper.pdf) [[Code]](/examples/domain_adaptation/object_detection/cycle_gan.py)
- **D-adapt** - Decoupled Adaptation for Cross-Domain Object Detection [[ICLR 2022]](https://openreview.net/pdf?id=VNqaB1g9393) [[Code]](/examples/domain_adaptation/object_detection/d_adapt)

##### Domain Adaptation for Semantic Segmentation [[Code]](/examples/domain_adaptation/semantic_segmentation/)
- **CycleGAN** - Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks [[ICCV 2017]](https://openaccess.thecvf.com/content_ICCV_2017/papers/Zhu_Unpaired_Image-To-Image_Translation_ICCV_2017_paper.pdf) [[Code]](/examples/domain_adaptation/semantic_segmentation/cycle_gan.py)
- **CyCADA** - Cycle-Consistent Adversarial Domain Adaptation [[ICML 2018]](http://proceedings.mlr.press/v80/hoffman18a.html) [[Code]](/examples/domain_adaptation/semantic_segmentation/cycada.py)
- **ADVENT** - Adversarial Entropy Minimization for Domain Adaptation in Semantic Segmentation [[CVPR 2019]](http://openaccess.thecvf.com/content_CVPR_2019/papers/Vu_ADVENT_Adversarial_Entropy_Minimization_for_Domain_Adaptation_in_Semantic_Segmentation_CVPR_2019_paper.pdf) [[Code]](/examples/domain_adaptation/semantic_segmentation/advent.py)
- **FDA** - Fourier Domain Adaptation for Semantic Segmentation [[CVPR 2020]](https://arxiv.org/abs/2004.05498) [[Code]](/examples/domain_adaptation/semantic_segmentation/fda.py)

##### Domain Adaptation for Keypoint Detection [[Code]](/examples/domain_adaptation/keypoint_detection)
- **RegDA** - Regressive Domain Adaptation for Unsupervised Keypoint Detection [[CVPR 2021]](http://ise.thss.tsinghua.edu.cn/~mlong/doc/regressive-domain-adaptation-cvpr21.pdf) [[Code]](/examples/domain_adaptation/keypoint_detection)

##### Domain Adaptation for Person Re-identification [[Code]](/examples/domain_adaptation/re_identification/)
- **IBN-Net** - Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net [[ECCV 2018]](https://openaccess.thecvf.com/content_ECCV_2018/papers/Xingang_Pan_Two_at_Once_ECCV_2018_paper.pdf)
- **MMT** - Mutual Mean-Teaching: Pseudo Label Refinery for Unsupervised Domain Adaptation on Person Re-identification [[ICLR 2020]](https://arxiv.org/abs/2001.01526) [[Code]](/examples/domain_adaptation/re_identification/mmt.py)
- **SPGAN** - Similarity Preserving Generative Adversarial Network [[CVPR 2018]](https://arxiv.org/pdf/1811.10551.pdf) [[Code]](/examples/domain_adaptation/re_identification/spgan.py)

##### Partial Domain Adaptation [[Code]](/examples/domain_adaptation/partial_domain_adaptation)
- **IWAN** - Importance Weighted Adversarial Nets for Partial Domain Adaptation[[CVPR 2018]](https://arxiv.org/abs/1803.09210) [[Code]](/examples/domain_adaptation/partial_domain_adaptation/iwan.py)
- **AFN** - Larger Norm More Transferable: An Adaptive Feature Norm Approach for
Unsupervised Domain Adaptation [[ICCV 2019]](https://arxiv.org/pdf/1811.07456v2.pdf) [[Code]](/examples/domain_adaptation/partial_domain_adaptation/afn.py)

##### Open-set Domain Adaptation [[Code]](/examples/domain_adaptation/openset_domain_adaptation)
- **OSBP** - Open Set Domain Adaptation by Backpropagation [[ECCV 2018]](https://arxiv.org/abs/1804.10427) [[Code]](/examples/domain_adaptation/openset_domain_adaptation/osbp.py)

##### Domain Generalization for Classification [[Code]](/examples/domain_generalization/image_classification/)
- **IBN-Net** - Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net [[ECCV 2018]](https://openaccess.thecvf.com/content_ECCV_2018/papers/Xingang_Pan_Two_at_Once_ECCV_2018_paper.pdf)
- **MixStyle** - Domain Generalization with MixStyle [[ICLR 2021]](https://arxiv.org/abs/2104.02008) [[Code]](/examples/domain_generalization/image_classification/mixstyle.py)
- **MLDG** - Learning to Generalize: Meta-Learning for Domain Generalization [[AAAI 2018]](https://arxiv.org/pdf/1710.03463.pdf) [[Code]](/examples/domain_generalization/image_classification/mldg.py)
- **IRM** - Invariant Risk Minimization [[ArXiv]](https://arxiv.org/abs/1907.02893) [[Code]](/examples/domain_generalization/image_classification/irm.py)
- **VREx** - Out-of-Distribution Generalization via Risk Extrapolation [[ICML 2021]](https://arxiv.org/abs/2003.00688) [[Code]](/examples/domain_generalization/image_classification/vrex.py)
- **GroupDRO** - Distributionally Robust Neural Networks for Group Shifts: On the Importance of Regularization for Worst-Case Generalization [[ArXiv]](https://arxiv.org/abs/1911.08731) [[Code]](/examples/domain_generalization/image_classification/groupdro.py)
- **Deep CORAL** - Correlation Alignment for Deep Domain Adaptation [[ECCV 2016]](https://arxiv.org/abs/1607.01719) [[Code]](/examples/domain_generalization/image_classification/coral.py)

##### Domain Generalization for Person Re-identification [[Code]](/examples/domain_generalization/re_identification/)
- **IBN-Net** - Two at Once: Enhancing Learning and Generalization Capacities via IBN-Net [[ECCV 2018]](https://openaccess.thecvf.com/content_ECCV_2018/papers/Xingang_Pan_Two_at_Once_ECCV_2018_paper.pdf)
- **MixStyle** - Domain Generalization with MixStyle [[ICLR 2021]](https://arxiv.org/abs/2104.02008) [[Code]](/examples/domain_generalization/re_identification/mixstyle.py)

##### Task Adaptation (Fine-Tuning) for Image Classification [[Code]](/examples/task_adaptation/image_classification/)
- **L2-SP** - Explicit inductive bias for transfer learning with convolutional networks [[ICML 2018]]((https://arxiv.org/abs/1802.01483)) [[Code]](/examples/task_adaptation/image_classification/delta.py)
- **BSS** - Catastrophic Forgetting Meets Negative Transfer: Batch Spectral Shrinkage for Safe Transfer Learning [[NIPS 2019]](https://proceedings.neurips.cc/paper/2019/file/c6bff625bdb0393992c9d4db0c6bbe45-Paper.pdf) [[Code]](/examples/task_adaptation/image_classification/bss.py)
- **DELTA** - DEep Learning Transfer using Fea- ture Map with Attention for convolutional networks [[ICLR 2019]](https://openreview.net/pdf?id=rkgbwsAcYm) [[Code]](/examples/task_adaptation/image_classification/delta.py)
- **Co-Tuning** - Co-Tuning for Transfer Learning [[NIPS 2020]](http://ise.thss.tsinghua.edu.cn/~mlong/doc/co-tuning-for-transfer-learning-nips20.pdf) [[Code]](/examples/task_adaptation/image_classification/co_tuning.py)
- **StochNorm** - Stochastic Normalization [[NIPS 2020]](https://papers.nips.cc/paper/2020/file/bc573864331a9e42e4511de6f678aa83-Paper.pdf) [[Code]](/examples/task_adaptation/image_classification/stochnorm.py)
- **LWF** - Learning Without Forgetting [[ECCV 2016]](https://arxiv.org/abs/1606.09282) [[Code]](/examples/task_adaptation/image_classification/lwf.py)
- **Bi-Tuning** - Bi-tuning of Pre-trained Representations [[ArXiv]](https://arxiv.org/abs/2011.06182?utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+arxiv%2FQSXk+%28ExcitingAds%21+cs+updates+on+arXiv.org%29) [[Code]](/examples/task_adaptation/image_classification/bi_tuning.py)

##### Pre-trained Model Selection [[Code]](/examples/model_selection)

- **H-Score** - An Information-theoretic Approach to Transferability in Task Transfer Learning [[ICIP 2019]](http://yangli-feasibility.com/home/media/icip-19.pdf) [[Code]](/examples/model_selection/hscore.py)
- **NCE** - Negative Conditional Entropy in `Transferability and Hardness of Supervised Classification Tasks [[ICCV 2019]](https://arxiv.org/pdf/1908.08142v1.pdf) [[Code]](/examples/model_selection/nce.py)
- **LEEP** - LEEP: A New Measure to Evaluate Transferability of Learned Representations [[ICML 2020]](http://proceedings.mlr.press/v119/nguyen20b/nguyen20b.pdf) [[Code]](/examples/model_selection/leep.py)
- **LogME** - Log Maximum Evidence in `LogME: Practical Assessment of Pre-trained Models for Transfer Learning [[ICML 2021]](https://arxiv.org/pdf/2102.11005.pdf) [[Code]](/examples/model_selection/logme.py)

##### Semi-Supervised Learning for Classification [[Code]](/examples/semi_supervised_learning/image_classification/)
- **Pseudo Label** - Pseudo-Label : The Simple and Efficient Semi-Supervised Learning Method for Deep Neural Networks [[ICML 2013]](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.664.3543&rep=rep1&type=pdf) [[Code]](/examples/semi_supervised_learning/image_classification/pseudo_label.py)
- **Pi Model** - Temporal Ensembling for Semi-Supervised Learning [[ICLR 2017]](https://arxiv.org/abs/1610.02242) [[Code]](/examples/semi_supervised_learning/image_classification/pi_model.py)
- **Mean Teacher** - Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results [[NIPS 2017]](https://arxiv.org/abs/1703.01780) [[Code]](/examples/semi_supervised_learning/image_classification/mean_teacher.py)
- **Noisy Student** - Self-Training With Noisy Student Improves ImageNet Classification [[CVPR 2020]](https://openaccess.thecvf.com/content_CVPR_2020/papers/Xie_Self-Training_With_Noisy_Student_Improves_ImageNet_Classification_CVPR_2020_paper.pdf) [[Code]](/examples/semi_supervised_learning/image_classification/noisy_student.py)
- **UDA** - Unsupervised Data Augmentation for Consistency Training [[NIPS 2020]](https://arxiv.org/pdf/1904.12848v4.pdf) [[Code]](/examples/semi_supervised_learning/image_classification/uda.py)
- **FixMatch** - Simplifying Semi-Supervised Learning with Consistency and Confidence [[NIPS 2020]](https://arxiv.org/abs/2001.07685) [[Code]](/examples/semi_supervised_learning/image_classification/fixmatch.py)
- **Self-Tuning** - Self-Tuning for Data-Efficient Deep Learning [[ICML 2021]](http://ise.thss.tsinghua.edu.cn/~mlong/doc/Self-Tuning-for-Data-Efficient-Deep-Learning-icml21.pdf) [[Code]](/examples/semi_supervised_learning/image_classification/self_tuning.py)
- **FlexMatch** - FlexMatch: Boosting Semi-Supervised Learning with Curriculum Pseudo Labeling [[NIPS 2021]](https://arxiv.org/abs/2110.08263) [[Code]](/examples/semi_supervised_learning/image_classification/flexmatch.py)
- **DebiasMatch** - Debiased Learning From Naturally Imbalanced Pseudo-Labels [[CVPR 2022]](https://openaccess.thecvf.com/content/CVPR2022/papers/Wang_Debiased_Learning_From_Naturally_Imbalanced_Pseudo-Labels_CVPR_2022_paper.pdf) [[Code]](/examples/semi_supervised_learning/image_classification/debiasmatch.py)
- **DST** - Debiased Self-Training for Semi-Supervised Learning [[NIPS 2022 Oral]](https://arxiv.org/abs/2202.07136) [[Code]](/examples/semi_supervised_learning/image_classification/dst.py)

## Installation

##### Install from Source Code

- Please git clone the library first. Then, run the following commands to install `tllib` and all the dependency.
```shell
python setup.py install
pip install -r requirements.txt
```
##### Install via `pip`

- Installing via `pip` is currently experimental.

```shell
pip install -i https://test.pypi.org/simple/ tllib==0.4
```


## Documentation
You can find the API documentation on the website: [Documentation](http://tl.thuml.ai/).

## Usage
You can find examples in the directory `examples`. A typical usage is 
```shell script
# Train a DANN on Office-31 Amazon -> Webcam task using ResNet 50.
# Assume you have put the datasets under the path `data/office-31`, 
# or you are glad to download the datasets automatically from the Internet to this path
python dann.py data/office31 -d Office31 -s A -t W -a resnet50  --epochs 20
```

## Contributing
We appreciate all contributions. If you are planning to contribute back bug-fixes, please do so without any further discussion. If you plan to contribute new features, utility functions or extensions, please first open an issue and discuss the feature with us. 

## Disclaimer on Datasets

This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have licenses to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license.

If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community!


## Contact
If you have any problem with our code or have some suggestions, including the future feature, feel free to contact 
- Baixu Chen (cbx_99_hasta@outlook.com)
- Junguang Jiang (JiangJunguang1123@outlook.com)
- Mingsheng Long (longmingsheng@gmail.com)

or describe it in Issues.

For Q&A in Chinese, you can choose to ask questions here before sending an email. [迁移学习算法库答疑专区](https://zhuanlan.zhihu.com/p/248104070)

## Citation

If you use this toolbox or benchmark in your research, please cite this project. 

```latex
@misc{jiang2022transferability,
      title={Transferability in Deep Learning: A Survey}, 
      author={Junguang Jiang and Yang Shu and Jianmin Wang and Mingsheng Long},
      year={2022},
      eprint={2201.05867},
      archivePrefix={arXiv},
      primaryClass={cs.LG}
}

@misc{tllib,
    author = {Junguang Jiang, Baixu Chen, Bo Fu, Mingsheng Long},
    title = {Transfer-Learning-library},
    year = {2020},
    publisher = {GitHub},
    journal = {GitHub repository},
    howpublished = {\url{https://github.com/thuml/Transfer-Learning-Library}},
}
```

## Acknowledgment

We would like to thank School of Software, Tsinghua University and The National Engineering Laboratory for Big Data Software for providing such an excellent ML research platform.



================================================
FILE: docs/Makefile
================================================
# Minimal makefile for Sphinx documentation
#

# You can set these variables from the command line.
SPHINXOPTS    =
SPHINXBUILD   = sphinx-build
SPHINXPROJ    = PyTorchSphinxTheme
SOURCEDIR     = .
BUILDDIR      = build

# Put it first so that "make" without argument is like "make help".
help:
	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)

.PHONY: help Makefile

# Catch-all target: route all unknown targets to Sphinx using the new
# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
%: Makefile
	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)


================================================
FILE: docs/conf.py
================================================
import sys
import os

sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('./demo/'))

from pytorch_sphinx_theme import __version__
import pytorch_sphinx_theme

# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))

# -- General configuration -----------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'

# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
    'sphinx.ext.intersphinx',
    'sphinx.ext.autodoc',
    'sphinx.ext.viewcode',
    'sphinxcontrib.httpdomain',
    'sphinx.ext.autosummary',
    'sphinx.ext.autosectionlabel',
    'sphinx.ext.napoleon',
]

# build the templated autosummary files
autosummary_generate = True
numpydoc_show_class_members = False

# autosectionlabel throws warnings if section names are duplicated.
# The following tells autosectionlabel to not throw a warning for
# duplicated section names that are in different documents.
autosectionlabel_prefix_document = True


napoleon_use_ivar = True

# Do not warn about external images (status badges in README.rst)
suppress_warnings = ['image.nonlocal_uri']

# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']

# The suffix of source filenames.
source_suffix = '.rst'

# The encoding of source files.
#source_encoding = 'utf-8-sig'

# The master toctree document.
master_doc = 'index'

# General information about the project.
project = u'Transfer Learning Library'
copyright = u'THUML Group'

# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = __version__
# The full version, including alpha/beta/rc tags.
release = __version__

# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
language = 'en'

# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []

# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None

# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True

# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True

# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False

# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'


# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []

intersphinx_mapping = {
    'rtd': ('https://docs.readthedocs.io/en/latest/', None),
    'python': ('https://docs.python.org/3', None),
    'numpy': ('https://numpy.org/doc/stable', None),
    'torch': ('https://pytorch.org/docs/stable', None),
    'torchvision': ('https://pytorch.org/vision/stable', None),
    'PIL': ('https://pillow.readthedocs.io/en/stable/', None)
}


# -- Options for HTML output ---------------------------------------------------

# The theme to use for HTML and HTML Help pages.  See the documentation for
# a list of builtin themes.
html_theme = 'pytorch_sphinx_theme'
html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]

# Theme options are theme-specific and customize the look and feel of a theme
# further.  For a list of options available for each theme, see the
# documentation.
html_theme_options = {
    'canonical_url': '',
    'analytics_id': '',
    'logo_only': False,
    'display_version': False,
    'prev_next_buttons_location': 'bottom',
    'style_external_links': False,

    # Toc options
    'collapse_navigation': True,
    'sticky_navigation': False,
    'navigation_depth': 4,
    'includehidden': True,
    'titles_only': False
}


# The name for this set of Sphinx documents.  If None, it defaults to
# "<project> v<release> documentation".
#html_title = None

# A shorter title for the navigation bar.  Default is the same as html_title.
#html_short_title = None

# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/images/TransLearn.png"

# The name of an image file (within the static path) to use as favicon of the
# docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']

# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'

# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True

# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}

# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}

# If false, no module index is generated.
#html_domain_indices = True

# If false, no index is generated.
#html_use_index = True

# If true, the index is split into individual pages for each letter.
#html_split_index = False

# If true, links to the reST sources are added to the pages.
html_show_sourcelink = True

# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True

# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True

# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it.  The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''

# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None


# Disable displaying type annotations, these can be very verbose
autodoc_typehints = 'none'

# Output file base name for HTML help builder.
htmlhelp_basename = 'TransferLearningLibrary'


# -- Options for LaTeX output --------------------------------------------------

latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',

# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',

# Additional stuff for the LaTeX preamble.
#'preamble': '',
}

# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
  ('index', 'TransferLearningLibrary.tex', u'Transfer Learning Library Documentation',
   u'THUML', 'manual'),
]

# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None

# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False

# If true, show page references after internal links.
#latex_show_pagerefs = False

# If true, show URL addresses after external links.
#latex_show_urls = False

# Documents to append as an appendix to all manuals.
#latex_appendices = []

# If false, no module index is generated.
#latex_domain_indices = True


# -- Options for manual page output --------------------------------------------

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
    ('index', 'Transfer Learning Library', u'Transfer Learning Library Documentation',
     [u'THUML'], 1)
]

# If true, show URL addresses after external links.
#man_show_urls = False


# -- Options for Texinfo output ------------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
#  dir menu entry, description, category)
texinfo_documents = [
  ('index', 'Transfer Learning Library', u'Transfer Learning Library Documentation',
   u'THUML', 'Transfer Learning Library',
   'One line description of project.', 'Miscellaneous'),
]

# Documents to append as an appendix to all manuals.
#texinfo_appendices = []

# If false, no module index is generated.
#texinfo_domain_indices = True

# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'



================================================
FILE: docs/index.rst
================================================
=====================================
Transfer Learning
=====================================

.. toctree::
    :maxdepth: 2
    :caption: Transfer Learning API
    :titlesonly:

    tllib/modules
    tllib/alignment/index
    tllib/translation
    tllib/self_training
    tllib/reweight
    tllib/normalization
    tllib/regularization
    tllib/ranking


.. toctree::
    :maxdepth: 2
    :caption: Common API
    :titlesonly:

    tllib/vision/index
    tllib/utils/index


================================================
FILE: docs/make.bat
================================================
@ECHO OFF

pushd %~dp0

REM Command file for Sphinx documentation

if "%SPHINXBUILD%" == "" (
	set SPHINXBUILD=python -msphinx
)
set SPHINXOPTS=
set SPHINXBUILD=sphinx-build
set SOURCEDIR=.
set BUILDDIR=build
set SPHINXPROJ=PyTorchSphinxTheme

if "%1" == "" goto help

%SPHINXBUILD% >NUL 2>NUL
if errorlevel 9009 (
	echo.
	echo.The Sphinx module was not found. Make sure you have Sphinx installed,
	echo.then set the SPHINXBUILD environment variable to point to the full
	echo.path of the 'sphinx-build' executable. Alternatively you may add the
	echo.Sphinx directory to PATH.
	echo.
	echo.If you don't have Sphinx installed, grab it from
	echo.http://sphinx-doc.org/
	exit /b 1
)

%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
goto end

:help
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%

:end
popd


================================================
FILE: docs/requirements.txt
================================================
sphinxcontrib-httpdomain
sphinx


================================================
FILE: docs/tllib/alignment/domain_adversarial.rst
================================================
==========================================
Domain Adversarial Training
==========================================


.. _DANN:

DANN: Domain Adversarial Neural Network
----------------------------------------

.. autoclass:: tllib.alignment.dann.DomainAdversarialLoss

.. _CDAN:

CDAN: Conditional Domain Adversarial Network
-----------------------------------------------

.. autoclass:: tllib.alignment.cdan.ConditionalDomainAdversarialLoss


.. autoclass:: tllib.alignment.cdan.RandomizedMultiLinearMap


.. autoclass:: tllib.alignment.cdan.MultiLinearMap


.. _ADDA:

ADDA: Adversarial Discriminative Domain Adaptation
-----------------------------------------------------

.. autoclass:: tllib.alignment.adda.DomainAdversarialLoss

.. note::
    ADDAgrl is also implemented and benchmarked. You can find code
    `here <https://github.com/thuml/Transfer-Learning-Library/blob/master/examples/domain_adaptation/image_classification/addagrl.py>`_.


.. _BSP:

BSP: Batch Spectral Penalization
-----------------------------------

.. autoclass:: tllib.alignment.bsp.BatchSpectralPenalizationLoss


.. _OSBP:

OSBP: Open Set Domain Adaptation by Backpropagation
----------------------------------------------------

.. autoclass:: tllib.alignment.osbp.UnknownClassBinaryCrossEntropy


.. _ADVENT:

ADVENT: Adversarial Entropy Minimization for Semantic Segmentation
------------------------------------------------------------------

.. autoclass:: tllib.alignment.advent.Discriminator

.. autoclass:: tllib.alignment.advent.DomainAdversarialEntropyLoss
    :members:


.. _DADAPT:

D-adapt: Decoupled Adaptation for Cross-Domain Object Detection
----------------------------------------------------------------
`Origin Paper <https://openreview.net/pdf?id=VNqaB1g9393>`_.

.. autoclass:: tllib.alignment.d_adapt.proposal.Proposal

.. autoclass:: tllib.alignment.d_adapt.proposal.PersistentProposalList

.. autoclass:: tllib.alignment.d_adapt.proposal.ProposalDataset

.. autoclass:: tllib.alignment.d_adapt.modeling.meta_arch.DecoupledGeneralizedRCNN

.. autoclass:: tllib.alignment.d_adapt.modeling.meta_arch.DecoupledRetinaNet



================================================
FILE: docs/tllib/alignment/hypothesis_adversarial.rst
================================================
==========================================
Hypothesis Adversarial Learning
==========================================



.. _MCD:

MCD: Maximum Classifier Discrepancy
--------------------------------------------

.. autofunction:: tllib.alignment.mcd.classifier_discrepancy

.. autofunction:: tllib.alignment.mcd.entropy

.. autoclass:: tllib.alignment.mcd.ImageClassifierHead


.. _MDD:


MDD: Margin Disparity Discrepancy
--------------------------------------------


.. autoclass:: tllib.alignment.mdd.MarginDisparityDiscrepancy


**MDD for Classification**


.. autoclass:: tllib.alignment.mdd.ClassificationMarginDisparityDiscrepancy


.. autoclass:: tllib.alignment.mdd.ImageClassifier
    :members:

.. autofunction:: tllib.alignment.mdd.shift_log


**MDD for Regression**

.. autoclass:: tllib.alignment.mdd.RegressionMarginDisparityDiscrepancy

.. autoclass:: tllib.alignment.mdd.ImageRegressor


.. _RegDA:

RegDA: Regressive Domain Adaptation
--------------------------------------------

.. autoclass:: tllib.alignment.regda.PseudoLabelGenerator2d

.. autoclass:: tllib.alignment.regda.RegressionDisparity

.. autoclass:: tllib.alignment.regda.PoseResNet2d


================================================
FILE: docs/tllib/alignment/index.rst
================================================
=====================================
Feature Alignment
=====================================

.. toctree::
    :maxdepth: 3
    :caption: Feature Alignment
    :titlesonly:

    statistics_matching
    domain_adversarial
    hypothesis_adversarial


================================================
FILE: docs/tllib/alignment/statistics_matching.rst
================================================
=====================
Statistics Matching
=====================


.. _DAN:

DAN: Deep Adaptation Network
-----------------------------

.. autoclass:: tllib.alignment.dan.MultipleKernelMaximumMeanDiscrepancy


.. _CORAL:

Deep CORAL: Correlation Alignment for Deep Domain Adaptation
--------------------------------------------------------------

.. autoclass:: tllib.alignment.coral.CorrelationAlignmentLoss


.. _JAN:

JAN: Joint Adaptation Network
------------------------------

.. autoclass:: tllib.alignment.jan.JointMultipleKernelMaximumMeanDiscrepancy



================================================
FILE: docs/tllib/modules.rst
================================================
=====================
Modules
=====================


Classifier
-------------------------------
.. autoclass:: tllib.modules.classifier.Classifier
    :members:

Regressor
-------------------------------
.. autoclass:: tllib.modules.regressor.Regressor
    :members:

Domain Discriminator
-------------------------------
.. autoclass:: tllib.modules.domain_discriminator.DomainDiscriminator
    :members:

GRL: Gradient Reverse Layer
-----------------------------
.. autoclass:: tllib.modules.grl.WarmStartGradientReverseLayer
    :members:

Gaussian Kernels
------------------------
.. autoclass:: tllib.modules.kernels.GaussianKernel


Entropy
------------------------
.. autofunction:: tllib.modules.entropy.entropy


Knowledge Distillation Loss
-------------------------------
.. autoclass:: tllib.modules.loss.KnowledgeDistillationLoss
    :members:




================================================
FILE: docs/tllib/normalization.rst
================================================
=====================
Normalization
=====================



.. _AFN:

AFN: Adaptive Feature Norm
-----------------------------

.. autoclass:: tllib.normalization.afn.AdaptiveFeatureNorm

.. autoclass:: tllib.normalization.afn.Block

.. autoclass:: tllib.normalization.afn.ImageClassifier


StochNorm: Stochastic Normalization
------------------------------------------

.. autoclass:: tllib.normalization.stochnorm.StochNorm1d

.. autoclass:: tllib.normalization.stochnorm.StochNorm2d

.. autoclass:: tllib.normalization.stochnorm.StochNorm3d

.. autofunction:: tllib.normalization.stochnorm.convert_model


.. _IBN:

IBN-Net: Instance-Batch Normalization Network
------------------------------------------------

.. autoclass:: tllib.normalization.ibn.InstanceBatchNorm2d

.. autoclass:: tllib.normalization.ibn.IBNNet
    :members:

.. automodule:: tllib.normalization.ibn
   :members:


.. _MIXSTYLE:

MixStyle: Domain Generalization with MixStyle
-------------------------------------------------

.. autoclass:: tllib.normalization.mixstyle.MixStyle

.. note::
    MixStyle is only activated during `training` stage, with some probability :math:`p`.

.. automodule:: tllib.normalization.mixstyle.resnet
    :members:


================================================
FILE: docs/tllib/ranking.rst
================================================
=====================
Ranking
=====================



.. _H_score:

H-score
-------------------------------------------

.. autofunction:: tllib.ranking.hscore.h_score


.. _LEEP:

LEEP: Log Expected Empirical Prediction
-------------------------------------------

.. autofunction:: tllib.ranking.leep.log_expected_empirical_prediction


.. _NCE:

NCE: Negative Conditional Entropy
-------------------------------------------

.. autofunction:: tllib.ranking.nce.negative_conditional_entropy


.. _LogME:

LogME: Log Maximum Evidence
-------------------------------------------

.. autofunction:: tllib.ranking.logme.log_maximum_evidence



================================================
FILE: docs/tllib/regularization.rst
================================================
===========================================
Regularization
===========================================

.. _L2:

L2
------

.. autoclass:: tllib.regularization.delta.L2Regularization


.. _L2SP:

L2-SP
------

.. autoclass:: tllib.regularization.delta.SPRegularization


.. _DELTA:

DELTA: DEep Learning Transfer using Feature Map with Attention
-------------------------------------------------------------------------------------

.. autoclass:: tllib.regularization.delta.BehavioralRegularization

.. autoclass:: tllib.regularization.delta.AttentionBehavioralRegularization

.. autoclass:: tllib.regularization.delta.IntermediateLayerGetter


.. _LWF:

LWF: Learning without Forgetting
------------------------------------------

.. autoclass:: tllib.regularization.lwf.Classifier



.. _CoTuning:

Co-Tuning
------------------------------------------

.. autoclass:: tllib.regularization.co_tuning.CoTuningLoss

.. autoclass:: tllib.regularization.co_tuning.Relationship


.. _StochNorm:


.. _BiTuning:

Bi-Tuning
------------------------------------------

.. autoclass:: tllib.regularization.bi_tuning.BiTuning


.. _BSS:

BSS: Batch Spectral Shrinkage
------------------------------------------

.. autoclass:: tllib.regularization.bss.BatchSpectralShrinkage



================================================
FILE: docs/tllib/reweight.rst
================================================
=======================================
Re-weighting
=======================================


.. _PADA:

PADA: Partial Adversarial Domain Adaptation
---------------------------------------------

.. autoclass:: tllib.reweight.pada.ClassWeightModule

.. autoclass:: tllib.reweight.pada.AutomaticUpdateClassWeightModule
    :members:

.. autofunction::  tllib.reweight.pada.collect_classification_results


.. _IWAN:

IWAN: Importance Weighted Adversarial Nets
---------------------------------------------

.. autoclass:: tllib.reweight.iwan.ImportanceWeightModule
    :members:



.. _GroupDRO:

GroupDRO: Group Distributionally robust optimization
------------------------------------------------------

.. autoclass:: tllib.reweight.groupdro.AutomaticUpdateDomainWeightModule
    :members:


================================================
FILE: docs/tllib/self_training.rst
================================================
=======================================
Self Training Methods
=======================================


.. _PseudoLabel:

Pseudo Label
-----------------------------

.. autoclass:: tllib.self_training.pseudo_label.ConfidenceBasedSelfTrainingLoss

.. _PiModel:

:math:`\Pi` Model
-----------------------------

.. autoclass:: tllib.self_training.pi_model.ConsistencyLoss


.. autoclass:: tllib.self_training.pi_model.L2ConsistencyLoss


.. _MeanTeacher:

Mean Teacher
-----------------------------

.. autoclass:: tllib.self_training.mean_teacher.EMATeacher


.. _SelfEnsemble:

Self Ensemble
-----------------------------

.. autoclass:: tllib.self_training.self_ensemble.ClassBalanceLoss


.. _UDA:

UDA
-----------------------------

.. autoclass:: tllib.self_training.uda.StrongWeakConsistencyLoss


.. _MCC:

MCC: Minimum Class Confusion
-----------------------------

.. autoclass:: tllib.self_training.mcc.MinimumClassConfusionLoss


.. _MMT:

MMT: Mutual Mean-Teaching
--------------------------
`Mutual Mean-Teaching: Pseudo Label Refinery for Unsupervised
Domain Adaptation on Person Re-identification (ICLR 2020) <https://arxiv.org/pdf/2001.01526.pdf>`_

State of the art unsupervised domain adaptation methods utilize clustering algorithms to generate pseudo labels on target
domain, which are noisy and thus harmful for training. Inspired by the teacher-student approaches, MMT framework
provides robust soft pseudo labels in an on-line peer-teaching manner.

We denote two networks as :math:`f_1,f_2`, their parameters as :math:`\theta_1,\theta_2`. The authors also
propose to use the temporally average model of each network :math:`\text{ensemble}(f_1),\text{ensemble}(f_2)` to generate more reliable
soft pseudo labels for supervising the other network. Specifically, the parameters of the temporally
average models of the two networks at current iteration :math:`T` are denoted as :math:`E^{(T)}[\theta_1]` and
:math:`E^{(T)}[\theta_2]` respectively, which can be calculated as

.. math::
    E^{(T)}[\theta_1] = \alpha E^{(T-1)}[\theta_1] + (1-\alpha)\theta_1
.. math::
    E^{(T)}[\theta_2] = \alpha E^{(T-1)}[\theta_2] + (1-\alpha)\theta_2

where :math:`E^{(T-1)}[\theta_1],E^{(T-1)}[\theta_2]` indicate the temporal average parameters of the two networks in
the previous iteration :math:`(T-1)`, the initial temporal average parameters are
:math:`E^{(0)}[\theta_1]=\theta_1,E^{(0)}[\theta_2]=\theta_2` and :math:`\alpha` is the momentum.

These two networks cooperate with each other in three ways:

- When running clustering algorithm, we average features produced by :math:`\text{ensemble}(f_1)` and
    :math:`\text{ensemble}(f_2)` instead of only considering one of them.
- A **soft triplet loss** is optimized between :math:`f_1` and :math:`\text{ensemble}(f_2)` and vice versa
    to force one network to learn from temporally average of another network.
- A **cross entropy loss** is optimized between :math:`f_1` and :math:`\text{ensemble}(f_2)` and vice versa
    to force one network to learn from temporally average of another network.

The above mentioned loss functions are listed below, more details can be found in training scripts.

.. autoclass:: tllib.vision.models.reid.loss.SoftTripletLoss

.. autoclass:: tllib.vision.models.reid.loss.CrossEntropyLoss


.. _SelfTuning:

Self Tuning
-----------------------------

.. autoclass:: tllib.self_training.self_tuning.Classifier

.. autoclass:: tllib.self_training.self_tuning.SelfTuning


.. _FlexMatch:

FlexMatch
-----------------------------

.. autoclass:: tllib.self_training.flexmatch.DynamicThresholdingModule
    :members:

.. _DST:

Debiased Self-Training
-----------------------------

.. autoclass:: tllib.self_training.dst.ImageClassifier

.. autoclass:: tllib.self_training.dst.WorstCaseEstimationLoss


================================================
FILE: docs/tllib/translation.rst
================================================
=======================================
Domain Translation
=======================================


.. _CycleGAN:

------------------------------------------------
CycleGAN: Cycle-Consistent Adversarial Networks
------------------------------------------------

Discriminator
--------------

.. autofunction:: tllib.translation.cyclegan.pixel

.. autofunction:: tllib.translation.cyclegan.patch

Generator
--------------

.. autofunction:: tllib.translation.cyclegan.resnet_9

.. autofunction:: tllib.translation.cyclegan.resnet_6

.. autofunction:: tllib.translation.cyclegan.unet_256

.. autofunction:: tllib.translation.cyclegan.unet_128


GAN Loss
--------------

.. autoclass:: tllib.translation.cyclegan.LeastSquaresGenerativeAdversarialLoss

.. autoclass:: tllib.translation.cyclegan.VanillaGenerativeAdversarialLoss

.. autoclass:: tllib.translation.cyclegan.WassersteinGenerativeAdversarialLoss

Translation
--------------

.. autoclass:: tllib.translation.cyclegan.Translation


Util
----------------

.. autoclass:: tllib.translation.cyclegan.util.ImagePool
    :members:

.. autofunction:: tllib.translation.cyclegan.util.set_requires_grad




.. _Cycada:

--------------------------------------------------------------
CyCADA: Cycle-Consistent Adversarial Domain Adaptation
--------------------------------------------------------------

.. autoclass:: tllib.translation.cycada.SemanticConsistency



.. _SPGAN:

-----------------------------------------------------------
SPGAN: Similarity Preserving Generative Adversarial Network
-----------------------------------------------------------
`Image-Image Domain Adaptation with Preserved Self-Similarity and Domain-Dissimilarity for Person Re-identification
<https://arxiv.org/pdf/1711.07027.pdf>`_. SPGAN is based on CycleGAN. An additional Siamese network is adopted to force
the generator to produce images different from identities in target dataset.

Siamese Network
-------------------

.. autoclass:: tllib.translation.spgan.siamese.SiameseNetwork

Contrastive Loss
-------------------

.. autoclass:: tllib.translation.spgan.loss.ContrastiveLoss


.. _FDA:

------------------------------------------------
FDA: Fourier Domain Adaptation
------------------------------------------------

.. autoclass:: tllib.translation.fourier_transform.FourierTransform

.. autofunction:: tllib.translation.fourier_transform.low_freq_mutate







================================================
FILE: docs/tllib/utils/analysis.rst
================================================
==============
Analysis Tools
==============


.. autofunction:: tllib.utils.analysis.collect_feature


.. autofunction:: tllib.utils.analysis.a_distance.calculate


.. autofunction:: tllib.utils.analysis.tsne.visualize



================================================
FILE: docs/tllib/utils/base.rst
================================================
Generic Tools
==============


Average Meter
---------------------------------

.. autoclass:: tllib.utils.meter.AverageMeter
   :members:

Progress Meter
---------------------------------

.. autoclass:: tllib.utils.meter.ProgressMeter
   :members:

Meter
---------------------------------

.. autoclass:: tllib.utils.meter.Meter
   :members:

Data
---------------------------------

.. autoclass:: tllib.utils.data.ForeverDataIterator
   :members:

.. autoclass:: tllib.utils.data.CombineDataset
   :members:

.. autofunction:: tllib.utils.data.send_to_device

.. autofunction:: tllib.utils.data.concatenate

Logger
-----------

.. autoclass:: tllib.utils.logger.TextLogger
   :members:


.. autoclass:: tllib.utils.logger.CompleteLogger
   :members:



================================================
FILE: docs/tllib/utils/index.rst
================================================
=====================================
Utilities
=====================================

.. toctree::
    :maxdepth: 2
    :caption: Utilities
    :titlesonly:

    base
    metric
    analysis

================================================
FILE: docs/tllib/utils/metric.rst
================================================
===========
Metrics
===========

Classification & Segmentation
==============================


Accuracy
---------------------------------

.. autofunction:: tllib.utils.metric.accuracy


ConfusionMatrix
---------------------------------

.. autoclass:: tllib.utils.metric.ConfusionMatrix
   :members:


================================================
FILE: docs/tllib/vision/datasets.rst
================================================
Datasets
=============================

Cross-Domain Classification
---------------------------------------------------------


--------------------------------------
ImageList
--------------------------------------

.. autoclass:: tllib.vision.datasets.imagelist.ImageList
   :members:

-------------------------------------
Office-31
-------------------------------------

.. autoclass:: tllib.vision.datasets.office31.Office31
   :members:
   :inherited-members:

---------------------------------------
Office-Caltech
---------------------------------------

.. autoclass:: tllib.vision.datasets.officecaltech.OfficeCaltech
   :members:
   :inherited-members:

---------------------------------------
Office-Home
---------------------------------------

.. autoclass:: tllib.vision.datasets.officehome.OfficeHome
   :members:
   :inherited-members:

--------------------------------------
VisDA-2017
--------------------------------------

.. autoclass:: tllib.vision.datasets.visda2017.VisDA2017
   :members:
   :inherited-members:

--------------------------------------
DomainNet
--------------------------------------

.. autoclass:: tllib.vision.datasets.domainnet.DomainNet
   :members:
   :inherited-members:

--------------------------------------
PACS
--------------------------------------

.. autoclass:: tllib.vision.datasets.pacs.PACS
   :members:


--------------------------------------
MNIST
--------------------------------------

.. autoclass:: tllib.vision.datasets.digits.MNIST
   :members:


--------------------------------------
USPS
--------------------------------------

.. autoclass:: tllib.vision.datasets.digits.USPS
   :members:


--------------------------------------
SVHN
--------------------------------------

.. autoclass:: tllib.vision.datasets.digits.SVHN
   :members:


Partial Cross-Domain Classification
----------------------------------------------------

---------------------------------------
Partial Wrapper
---------------------------------------

.. autofunction:: tllib.vision.datasets.partial.partial

.. autofunction:: tllib.vision.datasets.partial.default_partial


---------------------------------------
Caltech-256->ImageNet-1k
---------------------------------------

.. autoclass:: tllib.vision.datasets.partial.caltech_imagenet.CaltechImageNet
   :members:


---------------------------------------
ImageNet-1k->Caltech-256
---------------------------------------

.. autoclass:: tllib.vision.datasets.partial.imagenet_caltech.ImageNetCaltech
   :members:


Open Set Cross-Domain Classification
------------------------------------------------------

---------------------------------------
Open Set Wrapper
---------------------------------------

.. autofunction:: tllib.vision.datasets.openset.open_set

.. autofunction:: tllib.vision.datasets.openset.default_open_set


Cross-Domain Regression
------------------------------------------------------

---------------------------------------
ImageRegression
---------------------------------------

.. autoclass:: tllib.vision.datasets.regression.image_regression.ImageRegression
   :members:

---------------------------------------
DSprites
---------------------------------------
.. autoclass:: tllib.vision.datasets.regression.dsprites.DSprites
   :members:

---------------------------------------
MPI3D
---------------------------------------
.. autoclass:: tllib.vision.datasets.regression.mpi3d.MPI3D
   :members:


Cross-Domain Segmentation
-----------------------------------------------

---------------------------------------
SegmentationList
---------------------------------------
.. autoclass:: tllib.vision.datasets.segmentation.segmentation_list.SegmentationList
   :members:

---------------------------------------
Cityscapes
---------------------------------------
.. autoclass:: tllib.vision.datasets.segmentation.cityscapes.Cityscapes

---------------------------------------
GTA5
---------------------------------------
.. autoclass:: tllib.vision.datasets.segmentation.gta5.GTA5

---------------------------------------
Synthia
---------------------------------------
.. autoclass:: tllib.vision.datasets.segmentation.synthia.Synthia


---------------------------------------
Foggy Cityscapes
---------------------------------------
.. autoclass:: tllib.vision.datasets.segmentation.cityscapes.FoggyCityscapes


Cross-Domain Keypoint Detection
-----------------------------------------------

---------------------------------------
Dataset Base for Keypoint Detection
---------------------------------------
.. autoclass:: tllib.vision.datasets.keypoint_detection.keypoint_dataset.KeypointDataset
   :members:

.. autoclass:: tllib.vision.datasets.keypoint_detection.keypoint_dataset.Body16KeypointDataset
   :members:

.. autoclass:: tllib.vision.datasets.keypoint_detection.keypoint_dataset.Hand21KeypointDataset
   :members:

---------------------------------------
Rendered Handpose Dataset
---------------------------------------
.. autoclass:: tllib.vision.datasets.keypoint_detection.rendered_hand_pose.RenderedHandPose
   :members:

---------------------------------------
Hand-3d-Studio Dataset
---------------------------------------
.. autoclass:: tllib.vision.datasets.keypoint_detection.hand_3d_studio.Hand3DStudio
   :members:

---------------------------------------
FreiHAND Dataset
---------------------------------------
.. autoclass:: tllib.vision.datasets.keypoint_detection.freihand.FreiHand
   :members:

---------------------------------------
Surreal Dataset
---------------------------------------
.. autoclass:: tllib.vision.datasets.keypoint_detection.surreal.SURREAL
   :members:

---------------------------------------
LSP Dataset
---------------------------------------
.. autoclass:: tllib.vision.datasets.keypoint_detection.lsp.LSP
   :members:

---------------------------------------
Human3.6M Dataset
---------------------------------------
.. autoclass:: tllib.vision.datasets.keypoint_detection.human36m.Human36M
   :members:

Cross-Domain ReID
------------------------------------------------------

---------------------------------------
Market1501
---------------------------------------

.. autoclass:: tllib.vision.datasets.reid.market1501.Market1501
   :members:

---------------------------------------
DukeMTMC-reID
---------------------------------------

.. autoclass:: tllib.vision.datasets.reid.dukemtmc.DukeMTMC
   :members:

---------------------------------------
MSMT17
---------------------------------------

.. autoclass:: tllib.vision.datasets.reid.msmt17.MSMT17
   :members:


Natural Object Recognition
---------------------------------------------------------


-------------------------------------
Stanford Dogs
-------------------------------------

.. autoclass:: tllib.vision.datasets.stanford_dogs.StanfordDogs
   :members:

-------------------------------------
Stanford Cars
-------------------------------------

.. autoclass:: tllib.vision.datasets.stanford_cars.StanfordCars
   :members:

-------------------------------------
CUB-200-2011
-------------------------------------

.. autoclass:: tllib.vision.datasets.cub200.CUB200
   :members:

-------------------------------------
FVGC Aircraft
-------------------------------------

.. autoclass:: tllib.vision.datasets.aircrafts.Aircraft
   :members:

-------------------------------------
Oxford-IIIT Pets
-------------------------------------

.. autoclass:: tllib.vision.datasets.oxfordpets.OxfordIIITPets
   :members:

-------------------------------------
COCO-70
-------------------------------------

.. autoclass:: tllib.vision.datasets.coco70.COCO70
   :members:

-------------------------------------
DTD
-------------------------------------

.. autoclass:: tllib.vision.datasets.dtd.DTD
   :members:

-------------------------------------
OxfordFlowers102
-------------------------------------

.. autoclass:: tllib.vision.datasets.oxfordflowers.OxfordFlowers102
   :members:

-------------------------------------
Caltech101
-------------------------------------

.. autoclass:: tllib.vision.datasets.caltech101.Caltech101
   :members:


Specialized Image Classification
--------------------------------

-------------------------------------
PatchCamelyon
-------------------------------------

.. autoclass:: tllib.vision.datasets.patchcamelyon.PatchCamelyon
   :members:

-------------------------------------
Retinopathy
-------------------------------------

.. autoclass:: tllib.vision.datasets.retinopathy.Retinopathy
   :members:

-------------------------------------
EuroSAT
-------------------------------------

.. autoclass:: tllib.vision.datasets.eurosat.EuroSAT
   :members:

-------------------------------------
Resisc45
-------------------------------------

.. autoclass:: tllib.vision.datasets.resisc45.Resisc45
   :members:

-------------------------------------
Food-101
-------------------------------------

.. autoclass:: tllib.vision.datasets.food101.Food101
   :members:

-------------------------------------
SUN397
-------------------------------------

.. autoclass:: tllib.vision.datasets.sun397.SUN397
   :members:


================================================
FILE: docs/tllib/vision/index.rst
================================================
=====================================
Vision
=====================================

.. toctree::
    :maxdepth: 2
    :caption: Vision
    :titlesonly:

    datasets
    models
    transforms

================================================
FILE: docs/tllib/vision/models.rst
================================================
Models
===========================

------------------------------
Image Classification
------------------------------

ResNets
---------------------------------

.. automodule:: tllib.vision.models.resnet
   :members:

LeNet
--------------------------

.. automodule:: tllib.vision.models.digits.lenet
   :members:

DTN
--------------------------

.. automodule:: tllib.vision.models.digits.dtn
   :members:

----------------------------------
Object Detection
----------------------------------

.. autoclass:: tllib.vision.models.object_detection.meta_arch.TLGeneralizedRCNN
   :members:

.. autoclass:: tllib.vision.models.object_detection.meta_arch.TLRetinaNet
   :members:

.. autoclass:: tllib.vision.models.object_detection.proposal_generator.rpn.TLRPN

.. autoclass:: tllib.vision.models.object_detection.roi_heads.TLRes5ROIHeads
    :members:

.. autoclass:: tllib.vision.models.object_detection.roi_heads.TLStandardROIHeads
    :members:

----------------------------------
Semantic Segmentation
----------------------------------

.. autofunction:: tllib.vision.models.segmentation.deeplabv2.deeplabv2_resnet101


----------------------------------
Keypoint Detection
----------------------------------

PoseResNet
--------------------------

.. autofunction:: tllib.vision.models.keypoint_detection.pose_resnet.pose_resnet101

.. autoclass:: tllib.vision.models.keypoint_detection.pose_resnet.PoseResNet

.. autoclass:: tllib.vision.models.keypoint_detection.pose_resnet.Upsampling


Joint Loss
----------------------------------

.. autoclass:: tllib.vision.models.keypoint_detection.loss.JointsMSELoss

.. autoclass:: tllib.vision.models.keypoint_detection.loss.JointsKLLoss


-----------------------------------
Re-Identification
-----------------------------------

Models
---------------
.. autoclass:: tllib.vision.models.reid.resnet.ReidResNet

.. automodule:: tllib.vision.models.reid.resnet
    :members:

.. autoclass:: tllib.vision.models.reid.identifier.ReIdentifier
    :members:

Loss
-----------------------------------
.. autoclass:: tllib.vision.models.reid.loss.TripletLoss

Sampler
-----------------------------------
.. autoclass:: tllib.utils.data.RandomMultipleGallerySampler


================================================
FILE: docs/tllib/vision/transforms.rst
================================================
Transforms
=============================


Classification
---------------------------------

.. automodule:: tllib.vision.transforms
   :members:


Segmentation
---------------------------------


.. automodule:: tllib.vision.transforms.segmentation
   :members:


Keypoint Detection
---------------------------------


.. automodule:: tllib.vision.transforms.keypoint_detection
   :members:


================================================
FILE: examples/domain_adaptation/image_classification/README.md
================================================
# Unsupervised Domain Adaptation for Image Classification

## Installation

It’s suggested to use **pytorch==1.7.1** and torchvision==0.8.2 in order to reproduce the benchmark results.

Example scripts support all models in [PyTorch-Image-Models](https://github.com/rwightman/pytorch-image-models). You
also need to install timm to use PyTorch-Image-Models.

```
pip install timm
```

## Dataset

Following datasets can be downloaded automatically:

- [MNIST](http://yann.lecun.com/exdb/mnist/), [SVHN](http://ufldl.stanford.edu/housenumbers/)
  , [USPS](https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps)
- [Office31](https://www.cc.gatech.edu/~judy/domainadapt/)
- [OfficeCaltech](https://www.cc.gatech.edu/~judy/domainadapt/)
- [OfficeHome](https://www.hemanthdv.org/officeHomeDataset.html)
- [VisDA2017](http://ai.bu.edu/visda-2017/)
- [DomainNet](http://ai.bu.edu/M3SDA/)

You need to prepare following datasets manually if you want to use them:

- [ImageNet](https://www.image-net.org/)
- [ImageNetR](https://github.com/hendrycks/imagenet-r)
- [ImageNet-Sketch](https://github.com/HaohanWang/ImageNet-Sketch)

and prepare them following [Documentation for ImageNetR](/common/vision/datasets/imagenet_r.py)
and [ImageNet-Sketch](/common/vision/datasets/imagenet_sketch.py).

## Supported Methods

Supported methods include:

- [Domain Adversarial Neural Network (DANN)](https://arxiv.org/abs/1505.07818)
- [Deep Adaptation Network (DAN)](https://arxiv.org/pdf/1502.02791)
- [Joint Adaptation Network (JAN)](https://arxiv.org/abs/1605.06636)
- [Adversarial Discriminative Domain Adaptation (ADDA)](https://arxiv.org/pdf/1702.05464.pdf)
- [Conditional Domain Adversarial Network (CDAN)](https://arxiv.org/abs/1705.10667)
- [Maximum Classifier Discrepancy (MCD)](https://arxiv.org/abs/1712.02560)
- [Adaptive Feature Norm (AFN)](https://arxiv.org/pdf/1811.07456v2.pdf)
- [Batch Spectral Penalization (BSP)](http://ise.thss.tsinghua.edu.cn/~mlong/doc/batch-spectral-penalization-icml19.pdf)
- [Margin Disparity Discrepancy (MDD)](https://arxiv.org/abs/1904.05801)
- [Minimum Class Confusion (MCC)](https://arxiv.org/abs/1912.03699)
- [FixMatch](https://arxiv.org/abs/2001.07685)

## Usage

The shell files give the script to reproduce the benchmark with specified hyper-parameters. For example, if you want to
train DANN on Office31, use the following script

```shell script
# Train a DANN on Office-31 Amazon -> Webcam task using ResNet 50.
# Assume you have put the datasets under the path `data/office-31`, 
# or you are glad to download the datasets automatically from the Internet to this path
CUDA_VISIBLE_DEVICES=0 python dann.py data/office31 -d Office31 -s A -t W -a resnet50 --epochs 20 --seed 1 --log logs/dann/Office31_A2W
```

Note that ``-s`` specifies the source domain, ``-t`` specifies the target domain, and ``--log`` specifies where to store
results.

After running the above command, it will download ``Office-31`` datasets from the Internet if it's the first time you
run the code. Directory that stores datasets will be named as
``examples/domain_adaptation/image_classification/data/<dataset name>``.

If everything works fine, you will see results in following format::

    Epoch: [1][ 900/1000]	Time  0.60 ( 0.69)	Data  0.22 ( 0.31)	Loss   0.74 (  0.85)	Cls Acc 96.9 (95.1)	Domain Acc 64.1 (62.6)

You can also watch these results in the log file ``logs/dann/Office31_A2W/log.txt``.

After training, you can test your algorithm's performance by passing in ``--phase test``.

```
CUDA_VISIBLE_DEVICES=0 python dann.py data/office31 -d Office31 -s A -t W -a resnet50 --epochs 20 --seed 1 --log logs/dann/Office31_A2W --phase test
```

## Experiment and Results

**Notations**

- ``Origin`` means the accuracy reported by the original paper.
- ``Avg`` is the accuracy reported by `TLlib`.
- ``ERM`` refers to the model trained with data from the source domain.
- ``Oracle`` refers to the model trained with data from the target domain.

We found that the accuracies of adversarial methods (including DANN, ADDA, CDAN, MCD, BSP and MDD) are not stable even
after the random seed is fixed, thus we repeat running adversarial methods on *Office-31* and *VisDA-2017*
for three times and report their average accuracy.

### Office-31 accuracy on ResNet-50

| Methods | Origin | Avg  | A → W | D → W | W → D | A → D | D → A | W → A |
|---------|--------|------|-------|-------|-------|-------|-------|-------|
| ERM     | 76.1   | 79.5 | 75.8  | 95.5  | 99.0  | 79.3  | 63.6  | 63.8  |
| DANN    | 82.2   | 86.1 | 91.4  | 97.9  | 100.0 | 83.6  | 73.3  | 70.4  |
| ADDA    | /      | 87.3 | 94.6  | 97.5  | 99.7  | 90.0  | 69.6  | 72.5  |
| BSP     | 87.7   | 87.8 | 92.7  | 97.9  | 100.0 | 88.2  | 74.1  | 73.8  |
| DAN     | 80.4   | 83.7 | 84.2  | 98.4  | 100.0 | 87.3  | 66.9  | 65.2  |
| JAN     | 84.3   | 87.0 | 93.7  | 98.4  | 100.0 | 89.4  | 69.2  | 71.0  |
| CDAN    | 87.7   | 87.7 | 93.8  | 98.5  | 100.0 | 89.9  | 73.4  | 70.4  |
| MCD     | /      | 85.4 | 90.4  | 98.5  | 100.0 | 87.3  | 68.3  | 67.6  |
| AFN     | 85.7   | 88.6 | 94.0  | 98.9  | 100.0 | 94.4  | 72.9  | 71.1  |
| MDD     | 88.9   | 89.6 | 95.6  | 98.6  | 100.0 | 94.4  | 76.6  | 72.2  |
| MCC     | 89.4   | 89.6 | 94.1  | 98.4  | 99.8  | 95.6  | 75.5  | 74.2  |
| FixMatch| /      | 86.4 | 86.4  | 98.2  | 100.0 | 95.4  | 70.0  | 68.1  |

### Office-Home accuracy on ResNet-50

| Methods     | Origin | Avg  | Ar → Cl | Ar → Pr | Ar → Rw | Cl → Ar | Cl → Pr | Cl → Rw | Pr → Ar | Pr → Cl | Pr → Rw | Rw → Ar | Rw → Cl | Rw → Pr |
|-------------|--------|------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|
| ERM         | 46.1   | 58.4 | 41.1    | 65.9    | 73.7    | 53.1    | 60.1    | 63.3    | 52.2    | 36.7    | 71.8    | 64.8    | 42.6    | 75.2    |
| DAN         | 56.3   | 61.4 | 45.6    | 67.7    | 73.9    | 57.7    | 63.8    | 66.0    | 54.9    | 40.0    | 74.5    | 66.2    | 49.1    | 77.9    |
| DANN        | 57.6   | 65.2 | 53.8    | 62.6    | 74.0    | 55.8    | 67.3    | 67.3    | 55.8    | 55.1    | 77.9    | 71.1    | 60.7    | 81.1    |
| ADDA        | /      | 65.6 | 52.6    | 62.9    | 74.0    | 59.7    | 68.0    | 68.8    | 61.4    | 52.5    | 77.6    | 71.1    | 58.6    | 80.2    |
| JAN         | 58.3   | 65.9 | 50.8    | 71.9    | 76.5    | 60.6    | 68.3    | 68.7    | 60.5    | 49.6    | 76.9    | 71.0    | 55.9    | 80.5    |
| CDAN        | 65.8   | 68.8 | 55.2    | 72.4    | 77.6    | 62.0    | 69.7    | 70.9    | 62.4    | 54.3    | 80.5    | 75.5    | 61.0    | 83.8    |
| MCD         | /      | 67.8 | 51.7    | 72.2    | 78.2    | 63.7    | 69.5    | 70.8    | 61.5    | 52.8    | 78.0    | 74.5    | 58.4    | 81.8    |
| BSP         | 64.9   | 67.6 | 54.7    | 67.7    | 76.2    | 61.0    | 69.4    | 70.9    | 60.9    | 55.2    | 80.2    | 73.4    | 60.3    | 81.2    |
| AFN         | 67.3   | 68.2 | 53.2    | 72.7    | 76.8    | 65.0    | 71.3    | 72.3    | 65.0    | 51.4    | 77.9    | 72.3    | 57.8    | 82.4    |
| MDD         | 68.1   | 69.7 | 56.2    | 75.4    | 79.6    | 63.5    | 72.1    | 73.8    | 62.5    | 54.8    | 79.9    | 73.5    | 60.9    | 84.5    |
| MCC         | /      | 72.4 | 58.4    | 79.6    | 83.0    | 67.5    | 77.0    | 78.5    | 66.6    | 54.8    | 81.8    | 74.4    | 61.4    | 85.6    |
| FixMatch    | /      | 70.8 | 56.4    | 76.4    | 79.9    | 65.3    | 73.8    | 71.2    | 67.2    | 56.4    | 80.6    | 74.9    | 63.5    | 84.3    |

### Office-Home accuracy on vit_base_patch16_224 (batch size 24)

| Methods     | Ar → Cl | Ar → Pr | Ar → Rw | Cl → Ar | Cl → Pr | Cl → Rw | Pr → Ar | Pr → Cl | Pr → Rw | Rw → Ar | Rw → Cl | Rw → Pr | Avg  |
|-------------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|---------|------|
| Source Only | 52.4    | 82.1    | 86.9    | 76.8    | 84.1    | 86      | 75.1    | 51.2    | 88.1    | 78.3    | 51.5    | 87.8    | 75.0 |
| DANN        | 60.1    | 80.8    | 87.9    | 78.1    | 82.6    | 85.9    | 78.8    | 63.2    | 90.2    | 82.3    | 64      | 89.3    | 78.6 |
| DAN         | 56.3    | 83.6    | 87.5    | 77.7    | 84.7    | 86.7    | 75.9    | 54.5    | 88.5    | 80.2    | 56.2    | 88.2    | 76.7 |
| JAN         | 60.1    | 86.9    | 88.6    | 79.2    | 85.4    | 86.7    | 80.4    | 59.4    | 89.6    | 82      | 60.7    | 89.9    | 79.1 |
| CDAN        | 61.6    | 87.8    | 89.6    | 81.4    | 88.1    | 88.5    | 82.4    | 62.5    | 90.8    | 84.2    | 63.5    | 90.8    | 80.9 |
| MCD         | 52.3    | 75.3    | 85.3    | 75.4    | 75.4    | 78.3    | 68.8    | 49.7    | 86      | 80.6    | 60      | 89      | 73.0 |
| AFN         | 58.3    | 87.2    | 88.2    | 81.7    | 87      | 88.2    | 81      | 58.4    | 89.2    | 81.5    | 59.2    | 89.2    | 79.1 |
| MDD         | 64      | 89.3    | 90.4    | 82.2    | 87.7    | 89.2    | 82.8    | 64.9    | 91.7    | 83.7    | 65.4    | 92      | 81.9 |

### VisDA-2017 accuracy ResNet-101

| Methods     | Origin | Mean | plane | bcycl | bus  | car  | horse | knife | mcycl | person | plant | sktbrd | train | truck | Avg  |
|-------------|--------|------|-------|-------|------|------|-------|-------|-------|--------|-------|--------|-------|-------|------|
| ERM         | 52.4   | 51.7 | 63.6  | 35.3  | 50.6 | 78.2 | 74.6  | 18.7  | 82.1  | 16.0   | 84.2  | 35.5   | 77.4  | 4.7   | 56.9 |
| DANN        | 57.4   | 79.5 | 93.5  | 74.3  | 83.4 | 50.7 | 87.2  | 90.2  | 89.9  | 76.1   | 88.1  | 91.4   | 89.7  | 39.8  | 74.9 |
| ADDA        | /      | 77.5 | 95.6  | 70.8  | 84.4 | 54.0 | 87.8  | 75.8  | 88.4  | 69.3   | 84.1  | 86.2   | 85.0  | 48.0  | 74.3 |
| BSP         | 75.9   | 80.5 | 95.7  | 75.6  | 82.8 | 54.5 | 89.2  | 96.5  | 91.3  | 72.2   | 88.9  | 88.7   | 88.0  | 43.4  | 76.2 |
| DAN         | 61.1   | 66.4 | 89.2  | 37.2  | 77.7 | 61.8 | 81.7  | 64.3  | 90.6  | 61.4   | 79.9  | 37.7   | 88.1  | 27.4  | 67.2 |
| JAN         | /      | 73.4 | 96.3  | 66.0  | 82.0 | 44.1 | 86.4  | 70.3  | 87.9  | 74.6   | 83.0  | 64.6   | 84.5  | 41.3  | 70.3 |
| CDAN        | /      | 80.1 | 94.0  | 69.2  | 78.9 | 57.0 | 89.8  | 94.9  | 91.9  | 80.3   | 86.8  | 84.9   | 85.0  | 48.5  | 76.5 |
| MCD         | 71.9   | 77.7 | 87.8  | 75.7  | 84.2 | 78.1 | 91.6  | 95.3  | 88.1  | 78.3   | 83.4  | 64.5   | 84.8  | 20.9  | 76.7 |
| AFN         | 76.1   | 75.0 | 95.6  | 56.2  | 81.3 | 69.8 | 93.0  | 81.0  | 93.4  | 74.1   | 91.7  | 55.0   | 90.6  | 18.1  | 74.4 |
| MDD         | /      | 82.0 | 88.3  | 62.8  | 85.2 | 69.9 | 91.9  | 95.1  | 94.4  | 81.2   | 93.8  | 89.8   | 84.1  | 47.9  | 79.8 |
| MCC         | 78.8   | 83.6 | 95.3  | 85.8  | 77.1 | 68.0 | 93.9  | 92.9  | 84.5  | 79.5   | 93.6  | 93.7   | 85.3  | 53.8  | 80.4 |
| FixMatch    | /      | 79.5 | 96.5  | 76.6  | 72.6 | 84.6 | 96.3  | 92.6  | 90.5  | 81.8   | 91.9  | 74.6   | 87.3  | 8.6   | 78.4 |

### DomainNet accuracy on ResNet-101

| Methods   | c->p | c->r | c->s | p->c | p->r | p->s | r->c | r->p | r->s | s->c | s->p | s->r | Avg  |
|-------------|------|------|------|------|------|------|------|------|------|------|------|------|------|
| ERM         | 32.7 | 50.6 | 39.4 | 41.1 | 56.8 | 35.0 | 48.6 | 48.8 | 36.1 | 49.0 | 34.8 | 46.1 | 43.3 |
| DAN         | 38.8 | 55.2 | 43.9 | 45.9 | 59.0 | 40.8 | 50.8 | 49.8 | 38.9 | 56.1 | 45.9 | 55.5 | 48.4 |
| DANN        | 37.9 | 54.3 | 44.4 | 41.7 | 55.6 | 36.8 | 50.7 | 50.8 | 40.1 | 55.0 | 45.0 | 54.5 | 47.2 |
| JAN         | 40.5 | 56.7 | 45.1 | 47.2 | 59.9 | 43.0 | 54.2 | 52.6 | 41.9 | 56.6 | 46.2 | 55.5 | 50.0 |
| CDAN        | 40.4 | 56.8 | 46.1 | 45.1 | 58.4 | 40.5 | 55.6 | 53.6 | 43.0 | 57.2 | 46.4 | 55.7 | 49.9 |
| MCD         | 37.5 | 52.9 | 44.0 | 44.6 | 54.5 | 41.6 | 52.0 | 51.5 | 39.7 | 55.5 | 44.6 | 52.0 | 47.5 |
| MDD         | 42.9 | 59.5 | 47.5 | 48.6 | 59.4 | 42.6 | 58.3 | 53.7 | 46.2 | 58.7 | 46.5 | 57.7 | 51.8 |
| MCC         | 37.7 | 55.7 | 42.6 | 45.4 | 59.8 | 39.9 | 54.4 | 53.1 | 37.0 | 58.1 | 46.3 | 56.2 | 48.9 |

### DomainNet accuracy on ResNet-101 (Multi-Source)

| Methods     | Origin | Avg  | :c   | :i   | :p   | :q   | :r   | :s   |
|-------------|--------|------|------|------|------|------|------|------|
| ERM         | 32.9   | 47.0 | 64.9 | 25.2 | 54.4 | 16.9 | 68.2 | 52.3 |
| MDD         | /      | 48.8 | 68.7 | 29.7 | 58.2 | 9.7  | 69.4 | 56.9 |
| Oracle      | 63.0   | 69.1 | 78.2 | 40.7 | 71.6 | 69.7 | 83.8 | 70.6 |

### Performance on ImageNet-scale dataset

|      | ResNet50, ImageNet->ImageNetR | ig_resnext101_32x8d, ImageNet->ImageSketch |
|------|-------------------------------|------------------------------------------|
| ERM  | 35.6                          | 54.9                                     |
| DAN  | 39.8                          | 55.7                                     |
| DANN | 52.7                          | 56.5                                     |
| JAN  | 41.7                          | 55.7                                     |
| CDAN | 53.9                          | 58.2                                     |
| MCD  | 46.7                          | 55.0                                     |
| AFN  | 43.0                          | 55.1                                     |
| MDD  | 56.2                          | 62.4                                     |

## Visualization

After training `DANN`, run the following command

```
CUDA_VISIBLE_DEVICES=0 python dann.py data/office31 -d Office31 -s A -t W -a resnet50 --epochs 20 --seed 1 --log logs/dann/Office31_A2W --phase analysis
```

It may take a while, then in directory ``logs/dann/Office31_A2W/visualize``, you can find
``TSNE.png``.

Following are the t-SNE of representations from ResNet50 trained on source domain and those from DANN.

<img src="./fig/resnet_A2W.png" width="300"/>
<img src="./fig/dann_A2W.png" width="300"/>

## TODO

1. Support self-training methods
2. Support translation methods
3. Add results on ViT
4. Add results on ImageNet

## Citation

If you use these methods in your research, please consider citing.

```
@inproceedings{DANN,
    author = {Ganin, Yaroslav and Lempitsky, Victor},
    Booktitle = {ICML},
    Title = {Unsupervised domain adaptation by backpropagation},
    Year = {2015}
}

@inproceedings{DAN,
    author    = {Mingsheng Long and
    Yue Cao and
    Jianmin Wang and
    Michael I. Jordan},
    title     = {Learning Transferable Features with Deep Adaptation Networks},
    booktitle = {ICML},
    year      = {2015},
}

@inproceedings{JAN,
    title={Deep transfer learning with joint adaptation networks},
    author={Long, Mingsheng and Zhu, Han and Wang, Jianmin and Jordan, Michael I},
    booktitle={ICML},
    year={2017},
}

@inproceedings{ADDA,
    title={Adversarial discriminative domain adaptation},
    author={Tzeng, Eric and Hoffman, Judy and Saenko, Kate and Darrell, Trevor},
    booktitle={CVPR},
    year={2017}
}

@inproceedings{CDAN,
    author    = {Mingsheng Long and
                Zhangjie Cao and
                Jianmin Wang and
                Michael I. Jordan},
    title     = {Conditional Adversarial Domain Adaptation},
    booktitle = {NeurIPS},
    year      = {2018}
}

@inproceedings{MCD,
    title={Maximum classifier discrepancy for unsupervised domain adaptation},
    author={Saito, Kuniaki and Watanabe, Kohei and Ushiku, Yoshitaka and Harada, Tatsuya},
    booktitle={CVPR},
    year={2018}
}

@InProceedings{AFN,
    author = {Xu, Ruijia and Li, Guanbin and Yang, Jihan and Lin, Liang},
    title = {Larger Norm More Transferable: An Adaptive Feature Norm Approach for Unsupervised Domain Adaptation},
    booktitle = {ICCV},
    year = {2019}
}

@inproceedings{MDD,
    title={Bridging theory and algorithm for domain adaptation},
    author={Zhang, Yuchen and Liu, Tianle and Long, Mingsheng and Jordan, Michael},
    booktitle={ICML},
    year={2019},
}

@inproceedings{BSP,
    title={Transferability vs. discriminability: Batch spectral penalization for adversarial domain adaptation},
    author={Chen, Xinyang and Wang, Sinan and Long, Mingsheng and Wang, Jianmin},
    booktitle={ICML},
    year={2019},
}

@inproceedings{MCC,
    author    = {Ying Jin and
                Ximei Wang and
                Mingsheng Long and
                Jianmin Wang},
    title     = {Less Confusion More Transferable: Minimum Class Confusion for Versatile
               Domain Adaptation},
    year={2020},
    booktitle={ECCV},
}

@inproceedings{FixMatch,
    title={Fixmatch: Simplifying semi-supervised learning with consistency and confidence},
    author={Sohn, Kihyuk and Berthelot, David and Carlini, Nicholas and Zhang, Zizhao and Zhang, Han and Raffel, Colin A and Cubuk, Ekin Dogus and Kurakin, Alexey and Li, Chun-Liang},
    booktitle={NIPS},
    year={2020}
}

```


================================================
FILE: examples/domain_adaptation/image_classification/adda.py
================================================
"""
@author: Baixu Chen
@contact: cbx_99_hasta@outlook.com
Note: Our implementation is different from ADDA paper in several respects. We do not use separate networks for
source and target domain, nor fix classifier head. Besides, we do not adopt asymmetric objective loss function
of the feature extractor.
"""
import random
import time
import warnings
import copy
import argparse
import shutil
import os.path as osp

import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import SGD
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader

import utils
from tllib.alignment.adda import ImageClassifier
from tllib.alignment.dann import DomainAdversarialLoss
from tllib.modules.domain_discriminator import DomainDiscriminator
from tllib.modules.grl import WarmStartGradientReverseLayer
from tllib.utils.data import ForeverDataIterator
from tllib.utils.meter import AverageMeter, ProgressMeter
from tllib.utils.logger import CompleteLogger
from tllib.utils.analysis import collect_feature, tsne, a_distance

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def set_requires_grad(net, requires_grad=False):
    """
    Set requies_grad=Fasle for all the networks to avoid unnecessary computations
    """
    for param in net.parameters():
        param.requires_grad = requires_grad


def main(args: argparse.Namespace):
    logger = CompleteLogger(args.log, args.phase)
    print(args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    cudnn.benchmark = True

    # Data loading code
    train_transform = utils.get_train_transform(args.train_resizing, scale=args.scale, ratio=args.ratio,
                                                random_horizontal_flip=not args.no_hflip,
                                                random_color_jitter=False, resize_size=args.resize_size,
                                                norm_mean=args.norm_mean, norm_std=args.norm_std)
    val_transform = utils.get_val_transform(args.val_resizing, resize_size=args.resize_size,
                                            norm_mean=args.norm_mean, norm_std=args.norm_std)
    print("train_transform: ", train_transform)
    print("val_transform: ", val_transform)

    train_source_dataset, train_target_dataset, val_dataset, test_dataset, num_classes, args.class_names = \
        utils.get_dataset(args.data, args.root, args.source, args.target, train_transform, val_transform)
    train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)

    train_source_iter = ForeverDataIterator(train_source_loader)
    train_target_iter = ForeverDataIterator(train_target_loader)

    # create model
    print("=> using model '{}'".format(args.arch))
    backbone = utils.get_model(args.arch, pretrain=not args.scratch)
    pool_layer = nn.Identity() if args.no_pool else None
    source_classifier = ImageClassifier(backbone, num_classes, bottleneck_dim=args.bottleneck_dim,
                                        pool_layer=pool_layer, finetune=not args.scratch).to(device)

    if args.phase == 'train' and args.pretrain is None:
        # first pretrain the classifier wish source data
        print("Pretraining the model on source domain.")
        args.pretrain = logger.get_checkpoint_path('pretrain')
        pretrain_model = ImageClassifier(backbone, num_classes, bottleneck_dim=args.bottleneck_dim,
                                         pool_layer=pool_layer, finetune=not args.scratch).to(device)
        pretrain_optimizer = SGD(pretrain_model.get_parameters(), args.pretrain_lr, momentum=args.momentum,
                                 weight_decay=args.weight_decay, nesterov=True)
        pretrain_lr_scheduler = LambdaLR(pretrain_optimizer,
                                         lambda x: args.pretrain_lr * (1. + args.lr_gamma * float(x)) ** (
                                             -args.lr_decay))
        # start pretraining
        for epoch in range(args.pretrain_epochs):
            print("lr:", pretrain_lr_scheduler.get_lr())
            # pretrain for one epoch
            utils.empirical_risk_minimization(train_source_iter, pretrain_model, pretrain_optimizer,
                                              pretrain_lr_scheduler, epoch, args,
                                              device)
            # validate to show pretrain process
            utils.validate(val_loader, pretrain_model, args, device)

        torch.save(pretrain_model.state_dict(), args.pretrain)
        print("Pretraining process is done.")

    checkpoint = torch.load(args.pretrain, map_location='cpu')
    source_classifier.load_state_dict(checkpoint)
    target_classifier = copy.deepcopy(source_classifier)

    # freeze source classifier
    set_requires_grad(source_classifier, False)
    source_classifier.freeze_bn()

    domain_discri = DomainDiscriminator(in_feature=source_classifier.features_dim, hidden_size=1024).to(device)

    # define loss function
    grl = WarmStartGradientReverseLayer(alpha=1., lo=0., hi=2., max_iters=1000, auto_step=True)
    domain_adv = DomainAdversarialLoss(domain_discri, grl=grl).to(device)

    # define optimizer and lr scheduler
    # note that we only optimize target feature extractor
    optimizer = SGD(target_classifier.get_parameters(optimize_head=False) + domain_discri.get_parameters(), args.lr,
                    momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
    lr_scheduler = LambdaLR(optimizer, lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay))

    # resume from the best checkpoint
    if args.phase != 'train':
        checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
        target_classifier.load_state_dict(checkpoint)

    # analysis the model
    if args.phase == 'analysis':
        # extract features from both domains
        feature_extractor = nn.Sequential(target_classifier.backbone, target_classifier.pool_layer,
                                          target_classifier.bottleneck).to(device)
        source_feature = collect_feature(train_source_loader, feature_extractor, device)
        target_feature = collect_feature(train_target_loader, feature_extractor, device)
        # plot t-SNE
        tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.pdf')
        tsne.visualize(source_feature, target_feature, tSNE_filename)
        print("Saving t-SNE to", tSNE_filename)
        # calculate A-distance, which is a measure for distribution discrepancy
        A_distance = a_distance.calculate(source_feature, target_feature, device)
        print("A-distance =", A_distance)
        return

    if args.phase == 'test':
        acc1 = utils.validate(test_loader, target_classifier, args, device)
        print(acc1)
        return

    # start training
    best_acc1 = 0.
    for epoch in range(args.epochs):
        print(lr_scheduler.get_lr())
        # train for one epoch
        train(train_source_iter, train_target_iter, source_classifier, target_classifier, domain_adv,
              optimizer, lr_scheduler, epoch, args)

        # evaluate on validation set
        acc1 = utils.validate(val_loader, target_classifier, args, device)

        # remember best acc@1 and save checkpoint
        torch.save(target_classifier.state_dict(), logger.get_checkpoint_path('latest'))
        if acc1 > best_acc1:
            shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
        best_acc1 = max(acc1, best_acc1)

    print("best_acc1 = {:3.1f}".format(best_acc1))

    # evaluate on test set
    target_classifier.load_state_dict(torch.load(logger.get_checkpoint_path('best')))
    acc1 = utils.validate(test_loader, target_classifier, args, device)
    print("test_acc1 = {:3.1f}".format(acc1))

    logger.close()


def train(train_source_iter: ForeverDataIterator, train_target_iter: ForeverDataIterator,
          source_model: ImageClassifier, target_model: ImageClassifier, domain_adv: DomainAdversarialLoss,
          optimizer: SGD, lr_scheduler: LambdaLR, epoch: int, args: argparse.Namespace):
    batch_time = AverageMeter('Time', ':5.2f')
    data_time = AverageMeter('Data', ':5.2f')
    losses_transfer = AverageMeter('Transfer Loss', ':6.2f')
    domain_accs = AverageMeter('Domain Acc', ':3.1f')
    progress = ProgressMeter(
        args.iters_per_epoch,
        [batch_time, data_time, losses_transfer, domain_accs],
        prefix="Epoch: [{}]".format(epoch))

    # switch to train mode
    target_model.train()
    domain_adv.train()

    end = time.time()
    for i in range(args.iters_per_epoch):
        x_s, = next(train_source_iter)[:1]
        x_t, = next(train_target_iter)[:1]

        x_s = x_s.to(device)
        x_t = x_t.to(device)

        # measure data loading time
        data_time.update(time.time() - end)

        _, f_s = source_model(x_s)
        _, f_t = target_model(x_t)
        loss_transfer = domain_adv(f_s, f_t)

        # Compute gradient and do SGD step
        optimizer.zero_grad()
        loss_transfer.backward()
        optimizer.step()
        lr_scheduler.step()

        losses_transfer.update(loss_transfer.item(), x_s.size(0))
        domain_acc = domain_adv.domain_discriminator_accuracy
        domain_accs.update(domain_acc.item(), x_s.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            progress.display(i)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='ADDA for Unsupervised Domain Adaptation')
    # dataset parameters
    parser.add_argument('root', metavar='DIR',
                        help='root path of dataset')
    parser.add_argument('-d', '--data', metavar='DATA', default='Office31', choices=utils.get_dataset_names(),
                        help='dataset: ' + ' | '.join(utils.get_dataset_names()) +
                             ' (default: Office31)')
    parser.add_argument('-s', '--source', help='source domain(s)', nargs='+')
    parser.add_argument('-t', '--target', help='target domain(s)', nargs='+')
    parser.add_argument('--train-resizing', type=str, default='default')
    parser.add_argument('--val-resizing', type=str, default='default')
    parser.add_argument('--resize-size', type=int, default=224,
                        help='the image size after resizing')
    parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
                        help='Random resize scale (default: 0.08 1.0)')
    parser.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO',
                        help='Random resize aspect ratio (default: 0.75 1.33)')
    parser.add_argument('--no-hflip', action='store_true',
                        help='no random horizontal flipping during training')
    parser.add_argument('--norm-mean', type=float, nargs='+',
                        default=(0.485, 0.456, 0.406), help='normalization mean')
    parser.add_argument('--norm-std', type=float, nargs='+',
                        default=(0.229, 0.224, 0.225), help='normalization std')
    # model parameters
    parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
                        choices=utils.get_model_names(),
                        help='backbone architecture: ' +
                             ' | '.join(utils.get_model_names()) +
                             ' (default: resnet18)')
    parser.add_argument('--pretrain', type=str, default=None,
                        help='pretrain checkpoint for classification model')
    parser.add_argument('--bottleneck-dim', default=256, type=int,
                        help='Dimension of bottleneck')
    parser.add_argument('--no-pool', action='store_true',
                        help='no pool layer after the feature extractor.')
    parser.add_argument('--scratch', action='store_true', help='whether train from scratch.')
    # training parameters
    parser.add_argument('-b', '--batch-size', default=32, type=int,
                        metavar='N',
                        help='mini-batch size (default: 32)')
    parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
                        metavar='LR', help='initial learning rate of the classifier', dest='lr')
    parser.add_argument('--pretrain-lr', default=0.001, type=float, help='initial pretrain learning rate')
    parser.add_argument('--lr-gamma', default=0.0003, type=float, help='parameter for lr scheduler')
    parser.add_argument('--lr-decay', default=0.75, type=float, help='parameter for lr scheduler')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-3, type=float,
                        metavar='W', help='weight decay (default: 1e-3)',
                        dest='weight_decay')
    parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
                        help='number of data loading workers (default: 2)')
    parser.add_argument('--epochs', default=20, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('--pretrain-epochs', default=3, type=int, metavar='N',
                        help='number of total epochs (pretrain) to run')
    parser.add_argument('-i', '--iters-per-epoch', default=1000, type=int,
                        help='Number of iterations per epoch')
    parser.add_argument('-p', '--print-freq', default=100, type=int,
                        metavar='N', help='print frequency (default: 100)')
    parser.add_argument('--seed', default=None, type=int,
                        help='seed for initializing training. ')
    parser.add_argument('--per-class-eval', action='store_true',
                        help='whether output per-class accuracy during evaluation')
    parser.add_argument("--log", type=str, default='adda',
                        help="Where to save logs, checkpoints and debugging images.")
    parser.add_argument("--phase", type=str, default='train', choices=['train', 'test', 'analysis'],
                        help="When phase is 'test', only test the model."
                             "When phase is 'analysis', only analysis the model.")
    args = parser.parse_args()
    main(args)


================================================
FILE: examples/domain_adaptation/image_classification/adda.sh
================================================
#!/usr/bin/env bash
# ResNet50, Office31, Single Source
CUDA_VISIBLE_DEVICES=0 python adda.py data/office31 -d Office31 -s A -t W -a resnet50 --epochs 20 --seed 1 --log logs/adda/Office31_A2W
CUDA_VISIBLE_DEVICES=0 python adda.py data/office31 -d Office31 -s D -t W -a resnet50 --epochs 20 --seed 1 --log logs/adda/Office31_D2W
CUDA_VISIBLE_DEVICES=0 python adda.py data/office31 -d Office31 -s W -t D -a resnet50 --epochs 20 --seed 1 --log logs/adda/Office31_W2D
CUDA_VISIBLE_DEVICES=0 python adda.py data/office31 -d Office31 -s A -t D -a resnet50 --epochs 20 --seed 1 --log logs/adda/Office31_A2D
CUDA_VISIBLE_DEVICES=0 python adda.py data/office31 -d Office31 -s D -t A -a resnet50 --epochs 20 --seed 1 --log logs/adda/Office31_D2A
CUDA_VISIBLE_DEVICES=0 python adda.py data/office31 -d Office31 -s W -t A -a resnet50 --epochs 20 --seed 1 --log logs/adda/Office31_W2A

# ResNet50, Office-Home, Single Source
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Ar -t Cl -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Ar2Cl
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Ar -t Pr -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Ar2Pr
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Ar -t Rw -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Ar2Rw
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Cl -t Ar -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Cl2Ar
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Cl -t Pr -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Cl2Pr
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Cl -t Rw -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Cl2Rw
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Pr -t Ar -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Pr2Ar
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Pr -t Cl -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Pr2Cl
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Pr -t Rw -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Pr2Rw
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Rw -t Ar -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Rw2Ar
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Rw -t Cl -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Rw2Cl
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Rw -t Pr -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_Rw2Pr

# ResNet101, VisDA-2017, Single Source
CUDA_VISIBLE_DEVICES=0 python adda.py data/visda-2017 -d VisDA2017 -s Synthetic -t Real -a resnet101 \
    --epochs 30 --seed 0 --per-class-eval --train-resizing cen.crop --log logs/adda/VisDA2017

# ResNet101, DomainNet, Single Source
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s c -t p -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_c2p
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s c -t r -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_c2r
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s c -t s -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_c2s
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s p -t c -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_p2c
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s p -t r -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_p2r
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s p -t s -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_p2s
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s r -t c -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_r2c
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s r -t p -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_r2p
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s r -t s -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_r2s
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s s -t c -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_s2c
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s s -t p -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_s2p
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s s -t r -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/DomainNet_s2r

# ResNet50, ImageNet200 -> ImageNetR
CUDA_VISIBLE_DEVICES=0 python adda.py data/ImageNetR -d ImageNetR -s IN -t INR -a resnet50 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/adda/ImageNet_IN2INR

# ig_resnext101_32x8d, ImageNet -> ImageNetSketch
CUDA_VISIBLE_DEVICES=0 python adda.py data/imagenet-sketch -d ImageNetSketch -s IN -t sketch -a ig_resnext101_32x8d --epochs 30 -i 2500 -p 500 --bottleneck-dim 1024 --log logs/dann_ig_resnext101_32x8d/ImageNet_IN2sketch

# Vision Transformer, Office-Home, Single Source
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Ar -t Cl -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Ar2Cl
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Ar -t Pr -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Ar2Pr
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Ar -t Rw -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Ar2Rw
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Cl -t Ar -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Cl2Ar
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Cl -t Pr -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Cl2Pr
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Cl -t Rw -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Cl2Rw
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Pr -t Ar -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Pr2Ar
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Pr -t Cl -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Pr2Cl
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Pr -t Rw -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Pr2Rw
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Rw -t Ar -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Rw2Ar
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Rw -t Cl -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Rw2Cl
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Rw -t Pr -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Rw2Pr

# ResNet50, Office-Home, Multi Source
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Cl Pr Rw -t Ar -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_:2Ar
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Ar Pr Rw -t Cl -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_:2Cl
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Ar Cl Rw -t Pr -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_:2Pr
CUDA_VISIBLE_DEVICES=0 python adda.py data/office-home -d OfficeHome -s Ar Cl Pr -t Rw -a resnet50 --epochs 30 --seed 0 --log logs/adda/OfficeHome_:2Rw

# ResNet101, DomainNet, Multi Source
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s i p q r s -t c -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/adda/DomainNet_:2c
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s c p q r s -t i -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/adda/DomainNet_:2i
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s c i q r s -t p -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/adda/DomainNet_:2p
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s c i p r s -t q -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/adda/DomainNet_:2q
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s c i p q s -t r -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/adda/DomainNet_:2r
CUDA_VISIBLE_DEVICES=0 python adda.py data/domainnet -d DomainNet -s c i p q r -t s -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/adda/DomainNet_:2s

# Digits
CUDA_VISIBLE_DEVICES=0 python adda.py data/digits -d Digits -s MNIST -t USPS --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 28 --no-hflip --norm-mean 0.5 --norm-std 0.5 -a lenet --no-pool --lr 0.01 -b 128 -i 2500 --scratch --seed 0 --log logs/adda/MNIST2USPS
CUDA_VISIBLE_DEVICES=0 python adda.py data/digits -d Digits -s USPS -t MNIST --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 28 --no-hflip --norm-mean 0.5 --norm-std 0.5 -a lenet --no-pool --lr 0.1 -b 128 -i 2500 --scratch --seed 0 --log logs/adda/USPS2MNIST
CUDA_VISIBLE_DEVICES=0 python adda.py data/digits -d Digits -s SVHNRGB -t MNISTRGB --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 32 --no-hflip --norm-mean 0.5 0.5 0.5 --norm-std 0.5 0.5 0.5 -a dtn --no-pool --lr 0.03 --lr-d 0.03 -b 128 -i 2500 --scratch --seed 0 --log logs/adda/SVHN2MNIST


================================================
FILE: examples/domain_adaptation/image_classification/afn.py
================================================
"""
@author: Baixu Chen
@contact: cbx_99_hasta@outlook.com
"""
import random
import time
import warnings
import argparse
import shutil
import os.path as osp

import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import SGD
from torch.utils.data import DataLoader
import torch.nn.functional as F

import utils
from tllib.normalization.afn import AdaptiveFeatureNorm, ImageClassifier
from tllib.modules.entropy import entropy
from tllib.utils.data import ForeverDataIterator
from tllib.utils.metric import accuracy
from tllib.utils.meter import AverageMeter, ProgressMeter
from tllib.utils.logger import CompleteLogger
from tllib.utils.analysis import collect_feature, tsne, a_distance


device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def main(args: argparse.Namespace):
    logger = CompleteLogger(args.log, args.phase)
    print(args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    cudnn.benchmark = True

    # Data loading code
    train_transform = utils.get_train_transform(args.train_resizing, random_horizontal_flip=not args.no_hflip,
                                                random_color_jitter=False, resize_size=args.resize_size,
                                                norm_mean=args.norm_mean, norm_std=args.norm_std)
    val_transform = utils.get_val_transform(args.val_resizing, resize_size=args.resize_size,
                                            norm_mean=args.norm_mean, norm_std=args.norm_std)
    print("train_transform: ", train_transform)
    print("val_transform: ", val_transform)

    train_source_dataset, train_target_dataset, val_dataset, test_dataset, num_classes, args.class_names = \
        utils.get_dataset(args.data, args.root, args.source, args.target, train_transform, val_transform)
    train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)

    train_source_iter = ForeverDataIterator(train_source_loader)
    train_target_iter = ForeverDataIterator(train_target_loader)

    # create model
    print("=> using model '{}'".format(args.arch))
    backbone = utils.get_model(args.arch, pretrain=not args.scratch)
    pool_layer = nn.Identity() if args.no_pool else None
    classifier = ImageClassifier(backbone, num_classes, args.num_blocks,
                                 bottleneck_dim=args.bottleneck_dim, dropout_p=args.dropout_p,
                                 pool_layer=pool_layer, finetune=not args.scratch).to(device)
    adaptive_feature_norm = AdaptiveFeatureNorm(args.delta).to(device)

    # define optimizer
    # the learning rate is fixed according to origin paper
    optimizer = SGD(classifier.get_parameters(), args.lr, weight_decay=args.weight_decay)

    # resume from the best checkpoint
    if args.phase != 'train':
        checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
        classifier.load_state_dict(checkpoint)

    # analysis the model
    if args.phase == 'analysis':
        # extract features from both domains
        feature_extractor = nn.Sequential(classifier.backbone, classifier.pool_layer, classifier.bottleneck).to(device)
        source_feature = collect_feature(train_source_loader, feature_extractor, device)
        target_feature = collect_feature(train_target_loader, feature_extractor, device)
        # plot t-SNE
        tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.pdf')
        tsne.visualize(source_feature, target_feature, tSNE_filename)
        print("Saving t-SNE to", tSNE_filename)
        # calculate A-distance, which is a measure for distribution discrepancy
        A_distance = a_distance.calculate(source_feature, target_feature, device)
        print("A-distance =", A_distance)
        return

    if args.phase == 'test':
        acc1 = utils.validate(test_loader, classifier, args, device)
        print(acc1)
        return

    # start training
    best_acc1 = 0.
    for epoch in range(args.epochs):
        # train for one epoch
        train(train_source_iter, train_target_iter, classifier, adaptive_feature_norm, optimizer, epoch, args)

        # evaluate on validation set
        acc1 = utils.validate(val_loader, classifier, args, device)

        # remember best acc@1 and save checkpoint
        torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))
        if acc1 > best_acc1:
            shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
        best_acc1 = max(acc1, best_acc1)

    print("best_acc1 = {:3.1f}".format(best_acc1))

    # evaluate on test set
    classifier.load_state_dict(torch.load(logger.get_checkpoint_path('best')))
    acc1 = utils.validate(test_loader, classifier, args, device)
    print("test_acc1 = {:3.1f}".format(acc1))

    logger.close()


def train(train_source_iter: ForeverDataIterator, train_target_iter: ForeverDataIterator, model: ImageClassifier,
          adaptive_feature_norm: AdaptiveFeatureNorm, optimizer: SGD, epoch: int, args: argparse.Namespace):
    batch_time = AverageMeter('Time', ':3.1f')
    data_time = AverageMeter('Data', ':3.1f')
    cls_losses = AverageMeter('Cls Loss', ':3.2f')
    norm_losses = AverageMeter('Norm Loss', ':3.2f')
    src_feature_norm = AverageMeter('Source Feature Norm', ':3.2f')
    tgt_feature_norm = AverageMeter('Target Feature Norm', ':3.2f')
    cls_accs = AverageMeter('Cls Acc', ':3.1f')

    progress = ProgressMeter(
        args.iters_per_epoch,
        [batch_time, data_time, cls_losses, norm_losses, src_feature_norm, tgt_feature_norm, cls_accs],
        prefix="Epoch: [{}]".format(epoch))

    # switch to train mode
    model.train()

    end = time.time()
    for i in range(args.iters_per_epoch):
        x_s, labels_s = next(train_source_iter)[:2]
        x_t, = next(train_target_iter)[:1]

        x_s = x_s.to(device)
        x_t = x_t.to(device)
        labels_s = labels_s.to(device)

        # measure data loading time
        data_time.update(time.time() - end)

        # compute output
        y_s, f_s = model(x_s)
        y_t, f_t = model(x_t)

        # classification loss
        cls_loss = F.cross_entropy(y_s, labels_s)
        # norm loss
        norm_loss = adaptive_feature_norm(f_s) + adaptive_feature_norm(f_t)
        loss = cls_loss + norm_loss * args.trade_off_norm

        # using entropy minimization
        if args.trade_off_entropy:
            y_t = F.softmax(y_t, dim=1)
            entropy_loss = entropy(y_t, reduction='mean')
            loss += entropy_loss * args.trade_off_entropy

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # update statistics
        cls_acc = accuracy(y_s, labels_s)[0]

        cls_losses.update(cls_loss.item(), x_s.size(0))
        norm_losses.update(norm_loss.item(), x_s.size(0))
        src_feature_norm.update(f_s.norm(p=2, dim=1).mean().item(), x_s.size(0))
        tgt_feature_norm.update(f_t.norm(p=2, dim=1).mean().item(), x_s.size(0))
        cls_accs.update(cls_acc.item(), x_s.size(0))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            progress.display(i)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='AFN for Unsupervised Domain Adaptation')
    # dataset parameters
    parser.add_argument('root', metavar='DIR',
                        help='root path of dataset')
    parser.add_argument('-d', '--data', metavar='DATA', default='Office31', choices=utils.get_dataset_names(),
                        help='dataset: ' + ' | '.join(utils.get_dataset_names()) +
                             ' (default: Office31)')
    parser.add_argument('-s', '--source', help='source domain(s)', nargs='+')
    parser.add_argument('-t', '--target', help='target domain(s)', nargs='+')
    parser.add_argument('--train-resizing', type=str, default='ran.crop')
    parser.add_argument('--val-resizing', type=str, default='default')
    parser.add_argument('--resize-size', type=int, default=224,
                        help='the image size after resizing')
    parser.add_argument('--no-hflip', action='store_true',
                        help='no random horizontal flipping during training')
    parser.add_argument('--norm-mean', type=float, nargs='+',
                        default=(0.485, 0.456, 0.406), help='normalization mean')
    parser.add_argument('--norm-std', type=float, nargs='+',
                        default=(0.229, 0.224, 0.225), help='normalization std')
    # model parameters
    parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
                        choices=utils.get_model_names(),
                        help='backbone architecture: ' +
                             ' | '.join(utils.get_model_names()) +
                             ' (default: resnet18)')
    parser.add_argument('--no-pool', action='store_true',
                        help='no pool layer after the feature extractor.')
    parser.add_argument('--scratch', action='store_true', help='whether train from scratch.')
    parser.add_argument('-n', '--num-blocks', default=1, type=int, help='Number of basic blocks for classifier')
    parser.add_argument('--bottleneck-dim', default=1000, type=int, help='Dimension of bottleneck')
    parser.add_argument('--dropout-p', default=0.5, type=float,
                        help='Dropout probability')
    # training parameters
    parser.add_argument('-b', '--batch-size', default=32, type=int,
                        metavar='N',
                        help='mini-batch size (default: 32)')
    parser.add_argument('--lr', '--learning-rate', default=0.001, type=float,
                        metavar='LR', help='initial learning rate', dest='lr')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=5e-4, type=float,
                        metavar='W', help='weight decay (default: 5e-4)',
                        dest='weight_decay')
    parser.add_argument('--trade-off-norm', default=0.05, type=float,
                        help='the trade-off hyper-parameter for norm loss')
    parser.add_argument('--trade-off-entropy', default=None, type=float,
                        help='the trade-off hyper-parameter for entropy loss')
    parser.add_argument('-r', '--delta', default=1, type=float, help='Increment for L2 norm')
    parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
                        help='number of data loading workers (default: 2)')
    parser.add_argument('--epochs', default=20, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('-i', '--iters-per-epoch', default=500, type=int,
                        help='Number of iterations per epoch')
    parser.add_argument('-p', '--print-freq', default=100, type=int,
                        metavar='N', help='print frequency (default: 100)')
    parser.add_argument('--seed', default=None, type=int,
                        help='seed for initializing training. ')
    parser.add_argument('--per-class-eval', action='store_true',
                        help='whether output per-class accuracy during evaluation')
    parser.add_argument("--log", type=str, default='afn',
                        help="Where to save logs, checkpoints and debugging images.")
    parser.add_argument("--phase", type=str, default='train', choices=['train', 'test', 'analysis'],
                        help="When phase is 'test', only test the model."
                             "When phase is 'analysis', only analysis the model.")
    args = parser.parse_args()
    main(args)


================================================
FILE: examples/domain_adaptation/image_classification/afn.sh
================================================
#!/usr/bin/env bash
# ResNet50, Office31, Single Source
CUDA_VISIBLE_DEVICES=0 python afn.py data/office31 -d Office31 -s A -t W -a resnet50 --trade-off-entropy 0.1 --epochs 20 --seed 1 --log logs/afn/Office31_A2W
CUDA_VISIBLE_DEVICES=0 python afn.py data/office31 -d Office31 -s D -t W -a resnet50 --trade-off-entropy 0.1 --epochs 20 --seed 1 --log logs/afn/Office31_D2W
CUDA_VISIBLE_DEVICES=0 python afn.py data/office31 -d Office31 -s W -t D -a resnet50 --trade-off-entropy 0.1 --epochs 20 --seed 1 --log logs/afn/Office31_W2D
CUDA_VISIBLE_DEVICES=0 python afn.py data/office31 -d Office31 -s A -t D -a resnet50 --trade-off-entropy 0.1 --epochs 20 --seed 1 --log logs/afn/Office31_A2D
CUDA_VISIBLE_DEVICES=0 python afn.py data/office31 -d Office31 -s D -t A -a resnet50 --trade-off-entropy 0.1 --epochs 20 --seed 1 --log logs/afn/Office31_D2A
CUDA_VISIBLE_DEVICES=0 python afn.py data/office31 -d Office31 -s W -t A -a resnet50 --trade-off-entropy 0.1 --epochs 20 --seed 1 --log logs/afn/Office31_W2A

# ResNet50, Office-Home, Single Source
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Ar -t Cl -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Ar2Cl
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Ar -t Pr -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Ar2Pr
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Ar -t Rw -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Ar2Rw
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Cl -t Ar -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Cl2Ar
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Cl -t Pr -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Cl2Pr
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Cl -t Rw -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Cl2Rw
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Pr -t Ar -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Pr2Ar
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Pr -t Cl -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Pr2Cl
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Pr -t Rw -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Pr2Rw
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Rw -t Ar -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Rw2Ar
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Rw -t Cl -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Rw2Cl
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Rw -t Pr -a resnet50 --epochs 20 --seed 0 --log logs/afn/OfficeHome_Rw2Pr

# ResNet101, VisDA-2017, Single Source
CUDA_VISIBLE_DEVICES=0 python afn.py data/visda-2017 -d VisDA2017 -s Synthetic -t Real -a resnet101 -r 0.3 -b 36 \
    --epochs 10 -i 1000 --seed 0 --per-class-eval --train-resizing cen.crop --log logs/afn/VisDA2017

# ResNet101, DomainNet, Single Source
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s c -t p -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_c2p
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s c -t r -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_c2r
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s c -t s -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_c2s
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s p -t c -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_p2c
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s p -t r -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_p2r
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s p -t s -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_p2s
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s r -t c -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_r2c
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s r -t p -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_r2p
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s r -t s -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_r2s
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s s -t c -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_s2c
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s s -t p -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_s2p
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s s -t r -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --trade-off-norm 0.01 --lr 0.002 --log logs/afn/DomainNet_s2r

# ResNet50, ImageNet200 -> ImageNetR
CUDA_VISIBLE_DEVICES=0 python afn.py data/ImageNetR -d ImageNetR -s IN -t INR -a resnet50 --epochs 20 -i 2500 --seed 0 --log logs/afn/ImageNet_IN2INR

# ig_resnext101_32x8d, ImageNet -> ImageNetSketch
CUDA_VISIBLE_DEVICES=0 python afn.py data/imagenet-sketch -d ImageNetSketch -s IN -t sketch -a ig_resnext101_32x8d --epochs 20 -i 2500 --seed 0 --log logs/afn_ig_resnext101_32x8d/ImageNet_IN2sketch

# Vision Transformer, Office-Home, Single Source
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Ar -t Cl -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Ar2Cl
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Ar -t Pr -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Ar2Pr
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Ar -t Rw -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Ar2Rw
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Cl -t Ar -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Cl2Ar
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Cl -t Pr -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Cl2Pr
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Cl -t Rw -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Cl2Rw
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Pr -t Ar -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Pr2Ar
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Pr -t Cl -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Pr2Cl
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Pr -t Rw -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Pr2Rw
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Rw -t Ar -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Rw2Ar
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Rw -t Cl -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Rw2Cl
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Rw -t Pr -a vit_base_patch16_224 --no-pool --epochs 30 --seed 0 -b 24 --log logs/afn_vit/OfficeHome_Rw2Pr

# ResNet50, Office-Home, Multi Source
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Cl Pr Rw -t Ar -a resnet50 --epochs 30 --seed 0 --log logs/afn/OfficeHome_:2Ar
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Ar Pr Rw -t Cl -a resnet50 --epochs 30 --seed 0 --log logs/afn/OfficeHome_:2Cl
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Ar Cl Rw -t Pr -a resnet50 --epochs 30 --seed 0 --log logs/afn/OfficeHome_:2Pr
CUDA_VISIBLE_DEVICES=0 python afn.py data/office-home -d OfficeHome -s Ar Cl Pr -t Rw -a resnet50 --epochs 30 --seed 0 --log logs/afn/OfficeHome_:2Rw

# ResNet101, DomainNet, Multi Source
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s i p q r s -t c -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/afn/DomainNet_:2c
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s c p q r s -t i -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/afn/DomainNet_:2i
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s c i q r s -t p -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/afn/DomainNet_:2p
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s c i p r s -t q -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/afn/DomainNet_:2q
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s c i p q s -t r -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/afn/DomainNet_:2r
CUDA_VISIBLE_DEVICES=0 python afn.py data/domainnet -d DomainNet -s c i p q r -t s -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/afn/DomainNet_:2s

# Digits
CUDA_VISIBLE_DEVICES=0 python afn.py data/digits -d Digits -s MNIST -t USPS --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 28 --no-hflip --norm-mean 0.5 --norm-std 0.5 -a lenet --no-pool -r 0.3 --lr 0.01 --trade-off-entropy 0.03 -b 128 -i 2500 --scratch --seed 0 --log logs/afn/MNIST2USPS
CUDA_VISIBLE_DEVICES=0 python afn.py data/digits -d Digits -s USPS -t MNIST --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 28 --no-hflip --norm-mean 0.5 --norm-std 0.5 -a lenet --no-pool -r 0.1 --lr 0.03 --trade-off-entropy 0.03 -b 128 -i 2500 --scratch --seed 0 --log logs/afn/USPS2MNIST
CUDA_VISIBLE_DEVICES=0 python afn.py data/digits -d Digits -s SVHNRGB -t MNISTRGB --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 32 --no-hflip --norm-mean 0.5 0.5 0.5 --norm-std 0.5 0.5 0.5 -a dtn --no-pool -r 0.1 --lr 0.1 --trade-off-entropy 0.03  -b 128 -i 2500 --scratch --seed 0 --log logs/afn/SVHN2MNIST



================================================
FILE: examples/domain_adaptation/image_classification/bsp.py
================================================
"""
@author: Baixu Chen
@contact: cbx_99_hasta@outlook.com
"""
import random
import time
import warnings
import argparse
import shutil
import os.path as osp

import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import SGD
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
import torch.nn.functional as F

import utils
from tllib.alignment.dann import DomainAdversarialLoss
from tllib.alignment.bsp import BatchSpectralPenalizationLoss, ImageClassifier
from tllib.modules.domain_discriminator import DomainDiscriminator
from tllib.utils.data import ForeverDataIterator
from tllib.utils.metric import accuracy
from tllib.utils.meter import AverageMeter, ProgressMeter
from tllib.utils.logger import CompleteLogger
from tllib.utils.analysis import collect_feature, tsne, a_distance

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def main(args: argparse.Namespace):
    logger = CompleteLogger(args.log, args.phase)
    print(args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    cudnn.benchmark = True

    # Data loading code
    train_transform = utils.get_train_transform(args.train_resizing, scale=args.scale, ratio=args.ratio,
                                                random_horizontal_flip=not args.no_hflip,
                                                random_color_jitter=False, resize_size=args.resize_size,
                                                norm_mean=args.norm_mean, norm_std=args.norm_std)
    val_transform = utils.get_val_transform(args.val_resizing, resize_size=args.resize_size,
                                            norm_mean=args.norm_mean, norm_std=args.norm_std)
    print("train_transform: ", train_transform)
    print("val_transform: ", val_transform)

    train_source_dataset, train_target_dataset, val_dataset, test_dataset, num_classes, args.class_names = \
        utils.get_dataset(args.data, args.root, args.source, args.target, train_transform, val_transform)
    train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)

    train_source_iter = ForeverDataIterator(train_source_loader)
    train_target_iter = ForeverDataIterator(train_target_loader)

    # create model
    print("=> using model '{}'".format(args.arch))
    backbone = utils.get_model(args.arch, pretrain=not args.scratch)
    pool_layer = nn.Identity() if args.no_pool else None
    classifier = ImageClassifier(backbone, num_classes, bottleneck_dim=args.bottleneck_dim,
                                 pool_layer=pool_layer, finetune=not args.scratch).to(device)
    domain_discri = DomainDiscriminator(in_feature=classifier.features_dim, hidden_size=1024).to(device)

    # define optimizer and lr scheduler
    optimizer = SGD(classifier.get_parameters() + domain_discri.get_parameters(),
                    args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
    lr_scheduler = LambdaLR(optimizer, lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay))

    # define loss function
    domain_adv = DomainAdversarialLoss(domain_discri).to(device)
    bsp_penalty = BatchSpectralPenalizationLoss().to(device)

    # resume from the best checkpoint
    if args.phase != 'train':
        checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
        classifier.load_state_dict(checkpoint)

    # analysis the model
    if args.phase == 'analysis':
        # extract features from both domains
        feature_extractor = nn.Sequential(classifier.backbone, classifier.pool_layer, classifier.bottleneck).to(device)
        source_feature = collect_feature(train_source_loader, feature_extractor, device)
        target_feature = collect_feature(train_target_loader, feature_extractor, device)
        # plot t-SNE
        tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.pdf')
        tsne.visualize(source_feature, target_feature, tSNE_filename)
        print("Saving t-SNE to", tSNE_filename)
        # calculate A-distance, which is a measure for distribution discrepancy
        A_distance = a_distance.calculate(source_feature, target_feature, device)
        print("A-distance =", A_distance)
        return

    if args.phase == 'test':
        acc1 = utils.validate(test_loader, classifier, args, device)
        print(acc1)
        return

    if args.pretrain is None:
        # first pretrain the classifier wish source data
        print("Pretraining the model on source domain.")
        args.pretrain = logger.get_checkpoint_path('pretrain')
        pretrain_model = ImageClassifier(backbone, num_classes, bottleneck_dim=args.bottleneck_dim,
                                         pool_layer=pool_layer, finetune=not args.scratch).to(device)
        pretrain_optimizer = SGD(pretrain_model.get_parameters(), args.pretrain_lr, momentum=args.momentum,
                                 weight_decay=args.weight_decay, nesterov=True)
        pretrain_lr_scheduler = LambdaLR(pretrain_optimizer,
                                         lambda x: args.pretrain_lr * (1. + args.lr_gamma * float(x)) ** (
                                             -args.lr_decay))
        # start pretraining
        for epoch in range(args.pretrain_epochs):
            print("lr:", pretrain_lr_scheduler.get_lr())
            # pretrain for one epoch
            utils.empirical_risk_minimization(train_source_iter, pretrain_model, pretrain_optimizer,
                                              pretrain_lr_scheduler, epoch, args,
                                              device)
            # validate to show pretrain process
            utils.validate(val_loader, pretrain_model, args, device)

        torch.save(pretrain_model.state_dict(), args.pretrain)
        print("Pretraining process is done.")

    checkpoint = torch.load(args.pretrain, map_location='cpu')
    classifier.load_state_dict(checkpoint)

    # start training
    best_acc1 = 0.
    for epoch in range(args.epochs):
        print("lr:", lr_scheduler.get_last_lr()[0])
        # train for one epoch
        train(train_source_iter, train_target_iter, classifier, domain_adv, bsp_penalty, optimizer,
              lr_scheduler, epoch, args)

        # evaluate on validation set
        acc1 = utils.validate(val_loader, classifier, args, device)

        # remember best acc@1 and save checkpoint
        torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))
        if acc1 > best_acc1:
            shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
        best_acc1 = max(acc1, best_acc1)

    print("best_acc1 = {:3.1f}".format(best_acc1))

    # evaluate on test set
    classifier.load_state_dict(torch.load(logger.get_checkpoint_path('best')))
    acc1 = utils.validate(test_loader, classifier, args, device)
    print("test_acc1 = {:3.1f}".format(acc1))

    logger.close()


def train(train_source_iter: ForeverDataIterator, train_target_iter: ForeverDataIterator,
          model: ImageClassifier, domain_adv: DomainAdversarialLoss, bsp_penalty: BatchSpectralPenalizationLoss,
          optimizer: SGD, lr_scheduler: LambdaLR, epoch: int, args: argparse.Namespace):
    batch_time = AverageMeter('Time', ':5.2f')
    data_time = AverageMeter('Data', ':5.2f')
    losses = AverageMeter('Loss', ':6.2f')
    cls_accs = AverageMeter('Cls Acc', ':3.1f')
    domain_accs = AverageMeter('Domain Acc', ':3.1f')
    progress = ProgressMeter(
        args.iters_per_epoch,
        [batch_time, data_time, losses, cls_accs, domain_accs],
        prefix="Epoch: [{}]".format(epoch))

    # switch to train mode
    model.train()
    domain_adv.train()

    end = time.time()
    for i in range(args.iters_per_epoch):
        x_s, labels_s = next(train_source_iter)[:2]
        x_t, = next(train_target_iter)[:1]

        x_s = x_s.to(device)
        x_t = x_t.to(device)
        labels_s = labels_s.to(device)

        # measure data loading time
        data_time.update(time.time() - end)

        # compute output
        x = torch.cat((x_s, x_t), dim=0)
        y, f = model(x)
        y_s, y_t = y.chunk(2, dim=0)
        f_s, f_t = f.chunk(2, dim=0)

        cls_loss = F.cross_entropy(y_s, labels_s)
        transfer_loss = domain_adv(f_s, f_t)
        bsp_loss = bsp_penalty(f_s, f_t)
        domain_acc = domain_adv.domain_discriminator_accuracy
        loss = cls_loss + transfer_loss * args.trade_off + bsp_loss * args.trade_off_bsp

        cls_acc = accuracy(y_s, labels_s)[0]

        losses.update(loss.item(), x_s.size(0))
        cls_accs.update(cls_acc.item(), x_s.size(0))
        domain_accs.update(domain_acc.item(), x_s.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        lr_scheduler.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            progress.display(i)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='BSP for Unsupervised Domain Adaptation')
    # dataset parameters
    parser.add_argument('root', metavar='DIR',
                        help='root path of dataset')
    parser.add_argument('-d', '--data', metavar='DATA', default='Office31', choices=utils.get_dataset_names(),
                        help='dataset: ' + ' | '.join(utils.get_dataset_names()) +
                             ' (default: Office31)')
    parser.add_argument('-s', '--source', help='source domain(s)', nargs='+')
    parser.add_argument('-t', '--target', help='target domain(s)', nargs='+')
    parser.add_argument('--train-resizing', type=str, default='default')
    parser.add_argument('--val-resizing', type=str, default='default')
    parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
                        help='Random resize scale (default: 0.08 1.0)')
    parser.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO',
                        help='Random resize aspect ratio (default: 0.75 1.33)')
    parser.add_argument('--resize-size', type=int, default=224,
                        help='the image size after resizing')
    parser.add_argument('--no-hflip', action='store_true',
                        help='no random horizontal flipping during training')
    parser.add_argument('--norm-mean', type=float, nargs='+',
                        default=(0.485, 0.456, 0.406), help='normalization mean')
    parser.add_argument('--norm-std', type=float, nargs='+',
                        default=(0.229, 0.224, 0.225), help='normalization std')
    # model parameters
    parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
                        choices=utils.get_model_names(),
                        help='backbone architecture: ' +
                             ' | '.join(utils.get_model_names()) +
                             ' (default: resnet18)')
    parser.add_argument('--pretrain', type=str, default=None,
                        help='pretrain checkpoint for classification model')
    parser.add_argument('--bottleneck-dim', default=256, type=int,
                        help='Dimension of bottleneck')
    parser.add_argument('--no-pool', action='store_true',
                        help='no pool layer after the feature extractor.')
    parser.add_argument('--scratch', action='store_true', help='whether train from scratch.')
    parser.add_argument('--trade-off', default=1., type=float,
                        help='the trade-off hyper-parameter for transfer loss')
    parser.add_argument('--trade-off-bsp', default=2e-4, type=float,
                        help='the trade-off hyper-parameter for bsp loss')
    # training parameters
    parser.add_argument('-b', '--batch-size', default=32, type=int,
                        metavar='N',
                        help='mini-batch size (default: 32)')
    parser.add_argument('--lr', '--learning-rate', default=0.003, type=float,
                        metavar='LR', help='initial learning rate', dest='lr')
    parser.add_argument('--pretrain-lr', default=0.001, type=float, help='initial pretrain learning rate')
    parser.add_argument('--lr-gamma', default=0.001, type=float, help='parameter for lr scheduler')
    parser.add_argument('--lr-decay', default=0.75, type=float, help='parameter for lr scheduler')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-3, type=float,
                        metavar='W', help='weight decay (default: 1e-3)',
                        dest='weight_decay')
    parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
                        help='number of data loading workers (default: 2)')
    parser.add_argument('--epochs', default=20, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('--pretrain-epochs', default=3, type=int, metavar='N',
                        help='number of total epochs(pretrain) to run (default: 3)')
    parser.add_argument('-i', '--iters-per-epoch', default=1000, type=int,
                        help='Number of iterations per epoch')
    parser.add_argument('-p', '--print-freq', default=100, type=int,
                        metavar='N', help='print frequency (default: 100)')
    parser.add_argument('--seed', default=None, type=int,
                        help='seed for initializing training. ')
    parser.add_argument('--per-class-eval', action='store_true',
                        help='whether output per-class accuracy during evaluation')
    parser.add_argument("--log", type=str, default='bsp',
                        help="Where to save logs, checkpoints and debugging images.")
    parser.add_argument("--phase", type=str, default='train', choices=['train', 'test', 'analysis'],
                        help="When phase is 'test', only test the model."
                             "When phase is 'analysis', only analysis the model.")
    args = parser.parse_args()
    main(args)


================================================
FILE: examples/domain_adaptation/image_classification/bsp.sh
================================================
#!/usr/bin/env bash
# ResNet50, Office31, Single Source
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office31 -d Office31 -s A -t W -a resnet50 --epochs 20 --seed 1 --log logs/bsp/Office31_A2W
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office31 -d Office31 -s D -t W -a resnet50 --epochs 20 --seed 1 --log logs/bsp/Office31_D2W
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office31 -d Office31 -s W -t D -a resnet50 --epochs 20 --seed 1 --log logs/bsp/Office31_W2D
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office31 -d Office31 -s A -t D -a resnet50 --epochs 20 --seed 1 --log logs/bsp/Office31_A2D
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office31 -d Office31 -s D -t A -a resnet50 --epochs 20 --seed 1 --log logs/bsp/Office31_D2A
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office31 -d Office31 -s W -t A -a resnet50 --epochs 20 --seed 1 --log logs/bsp/Office31_W2A

# ResNet50, Office-Home, Single Source
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Ar -t Cl -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Ar2Cl
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Ar -t Pr -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Ar2Pr
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Ar -t Rw -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Ar2Rw
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Cl -t Ar -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Cl2Ar
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Cl -t Pr -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Cl2Pr
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Cl -t Rw -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Cl2Rw
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Pr -t Ar -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Pr2Ar
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Pr -t Cl -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Pr2Cl
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Pr -t Rw -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Pr2Rw
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Rw -t Ar -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Rw2Ar
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Rw -t Cl -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Rw2Cl
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Rw -t Pr -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_Rw2Pr

# ResNet101, VisDA-2017, Single Source
CUDA_VISIBLE_DEVICES=0 python bsp.py data/visda-2017 -d VisDA2017 -s Synthetic -t Real -a resnet101 \
    --epochs 30 --seed 0 --per-class-eval --train-resizing cen.crop --log logs/bsp/VisDA2017

# ResNet101, DomainNet, Single Source
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s c -t p -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_c2p
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s c -t r -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_c2r
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s c -t s -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_c2s
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s p -t c -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_p2c
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s p -t r -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_p2r
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s p -t s -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_p2s
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s r -t c -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_r2c
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s r -t p -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_r2p
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s r -t s -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_r2s
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s s -t c -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_s2c
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s s -t p -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_s2p
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s s -t r -a resnet101 --bottleneck-dim 1024 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/DomainNet_s2r

# ResNet50, ImageNet200 -> ImageNetR
CUDA_VISIBLE_DEVICES=0 python bsp.py data/ImageNetR -d ImageNetR -s IN -t INR -a resnet50 --epochs 30 -i 2500 -p 500 --seed 0 --log logs/bsp/ImageNet_IN2INR

# ig_resnext101_32x8d, ImageNet -> ImageNetSketch
CUDA_VISIBLE_DEVICES=0 python bsp.py data/imagenet-sketch -d ImageNetSketch -s IN -t sketch -a ig_resnext101_32x8d --epochs 30 -i 2500 -p 500 --bottleneck-dim 1024 --log logs/dann_ig_resnext101_32x8d/ImageNet_IN2sketch

# Vision Transformer, Office-Home, Single Source
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Ar -t Cl -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Ar2Cl
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Ar -t Pr -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Ar2Pr
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Ar -t Rw -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Ar2Rw
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Cl -t Ar -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Cl2Ar
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Cl -t Pr -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Cl2Pr
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Cl -t Rw -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Cl2Rw
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Pr -t Ar -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Pr2Ar
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Pr -t Cl -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Pr2Cl
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Pr -t Rw -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Pr2Rw
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Rw -t Ar -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Rw2Ar
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Rw -t Cl -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Rw2Cl
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Rw -t Pr -a vit_base_patch16_224 --epochs 30 --seed 0 -b 24 --no-pool --log logs/dann_vit/OfficeHome_Rw2Pr

# ResNet50, Office-Home, Multi Source
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Cl Pr Rw -t Ar -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_:2Ar
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Ar Pr Rw -t Cl -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_:2Cl
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Ar Cl Rw -t Pr -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_:2Pr
CUDA_VISIBLE_DEVICES=0 python bsp.py data/office-home -d OfficeHome -s Ar Cl Pr -t Rw -a resnet50 --epochs 30 --seed 0 --log logs/bsp/OfficeHome_:2Rw

# ResNet101, DomainNet, Multi Source
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s i p q r s -t c -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/bsp/DomainNet_:2c
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s c p q r s -t i -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/bsp/DomainNet_:2i
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s c i q r s -t p -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/bsp/DomainNet_:2p
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s c i p r s -t q -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/bsp/DomainNet_:2q
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s c i p q s -t r -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/bsp/DomainNet_:2r
CUDA_VISIBLE_DEVICES=0 python bsp.py data/domainnet -d DomainNet -s c i p q r -t s -a resnet101 --bottleneck-dim 1024 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/bsp/DomainNet_:2s

# Digits
CUDA_VISIBLE_DEVICES=0 python bsp.py data/digits -d Digits -s MNIST -t USPS --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 28 --no-hflip --norm-mean 0.5 --norm-std 0.5 -a lenet --no-pool --lr 0.01 --trade-off-bsp 0.0001 -b 128 -i 2500 --scratch --seed 0 --log logs/bsp/MNIST2USPS
CUDA_VISIBLE_DEVICES=0 python bsp.py data/digits -d Digits -s USPS -t MNIST --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 28 --no-hflip --norm-mean 0.5 --norm-std 0.5 -a lenet --no-pool --lr 0.1 --trade-off-bsp 0.0001 -b 128 -i 2500 --scratch --seed 0 --log logs/bsp/USPS2MNIST
CUDA_VISIBLE_DEVICES=0 python bsp.py data/digits -d Digits -s SVHNRGB -t MNISTRGB --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 32 --no-hflip --norm-mean 0.5 0.5 0.5 --norm-std 0.5 0.5 0.5 -a dtn --no-pool --lr 0.1 --trade-off-bsp 0.0001 -b 128 -i 2500 --scratch --seed 0 --log logs/bsp/SVHN2MNIST --pretrain-epochs 1


================================================
FILE: examples/domain_adaptation/image_classification/cc_loss.py
================================================
"""
@author: Ying Jin
@contact: sherryying003@gmail.com
"""
import random
import time
import warnings
import argparse
import shutil
import os.path as osp

import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import SGD
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
import torch.nn.functional as F

import utils
from tllib.self_training.mcc import MinimumClassConfusionLoss, ImageClassifier
from tllib.self_training.cc_loss import CCConsistency
from tllib.vision.transforms import MultipleApply
from tllib.utils.data import ForeverDataIterator
from tllib.utils.metric import accuracy
from tllib.utils.meter import AverageMeter, ProgressMeter
from tllib.utils.logger import CompleteLogger
from tllib.utils.analysis import collect_feature, tsne, a_distance

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def main(args: argparse.Namespace):
    logger = CompleteLogger(args.log, args.phase)
    print(args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    cudnn.benchmark = True

    # Data loading code
    train_source_transform = utils.get_train_transform(args.train_resizing, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.),
                                                       random_horizontal_flip=not args.no_hflip,
                                                       random_color_jitter=False, resize_size=args.resize_size,
                                                       norm_mean=args.norm_mean, norm_std=args.norm_std)
    weak_augment = utils.get_train_transform(args.train_resizing, scale=args.scale, ratio=args.ratio,
                                             random_horizontal_flip=not args.no_hflip,
                                             random_color_jitter=False, resize_size=args.resize_size,
                                             norm_mean=args.norm_mean, norm_std=args.norm_std)
    strong_augment = utils.get_train_transform(args.train_resizing, scale=args.scale, ratio=args.ratio,
                                               random_horizontal_flip=not args.no_hflip,
                                               random_color_jitter=False, resize_size=args.resize_size,
                                               norm_mean=args.norm_mean, norm_std=args.norm_std,
                                               auto_augment=args.auto_augment)
    train_target_transform = MultipleApply([weak_augment, strong_augment])
    val_transform = utils.get_val_transform(args.val_resizing, resize_size=args.resize_size,
                                            norm_mean=args.norm_mean, norm_std=args.norm_std)
    print("train_source_transform: ", train_source_transform)
    print("train_target_transform: ", train_target_transform)
    print("val_transform: ", val_transform)

    train_source_dataset, train_target_dataset, val_dataset, test_dataset, num_classes, args.class_names = \
        utils.get_dataset(args.data, args.root, args.source, args.target, train_source_transform, val_transform,
                          train_target_transform=train_target_transform)
    train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)

    train_source_iter = ForeverDataIterator(train_source_loader)
    train_target_iter = ForeverDataIterator(train_target_loader)

    # create model
    print("=> using model '{}'".format(args.arch))
    backbone = utils.get_model(args.arch, pretrain=not args.scratch)
    pool_layer = nn.Identity() if args.no_pool else None
    classifier = ImageClassifier(backbone, num_classes, bottleneck_dim=args.bottleneck_dim,
                                 pool_layer=pool_layer, finetune=not args.scratch).to(device)

    # define optimizer and lr scheduler
    optimizer = SGD(classifier.get_parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay,
                    nesterov=True)
    lr_scheduler = LambdaLR(optimizer, lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay))

    # resume from the best checkpoint
    if args.phase != 'train':
        checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
        classifier.load_state_dict(checkpoint)

    # analysis the model
    if args.phase == 'analysis':
        # extract features from both domains
        feature_extractor = nn.Sequential(classifier.backbone, classifier.pool_layer, classifier.bottleneck).to(device)
        source_feature = collect_feature(train_source_loader, feature_extractor, device)
        target_feature = collect_feature(train_target_loader, feature_extractor, device)
        # plot t-SNE
        tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.pdf')
        tsne.visualize(source_feature, target_feature, tSNE_filename)
        print("Saving t-SNE to", tSNE_filename)
        # calculate A-distance, which is a measure for distribution discrepancy
        A_distance = a_distance.calculate(source_feature, target_feature, device)
        print("A-distance =", A_distance)
        return

    if args.phase == 'test':
        acc1 = utils.validate(test_loader, classifier, args, device)
        print(acc1)
        return

    # start training
    best_acc1 = 0.
    for epoch in range(args.epochs):
        print("lr:", lr_scheduler.get_last_lr()[0])
        # train for one epoch
        train(train_source_iter, train_target_iter, classifier, optimizer,
              lr_scheduler, epoch, args)

        # evaluate on validation set
        acc1 = utils.validate(val_loader, classifier, args, device)

        # remember best acc@1 and save checkpoint
        torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))
        if acc1 > best_acc1:
            shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
        best_acc1 = max(acc1, best_acc1)

    print("best_acc1 = {:3.1f}".format(best_acc1))

    # evaluate on test set
    classifier.load_state_dict(torch.load(logger.get_checkpoint_path('best')))
    acc1 = utils.validate(test_loader, classifier, args, device)
    print("test_acc1 = {:3.1f}".format(acc1))

    logger.close()


def train(train_source_iter: ForeverDataIterator, train_target_iter: ForeverDataIterator,
          model: ImageClassifier, optimizer: SGD,
          lr_scheduler: LambdaLR, epoch: int, args: argparse.Namespace):
    batch_time = AverageMeter('Time', ':3.1f')
    data_time = AverageMeter('Data', ':3.1f')
    losses = AverageMeter('Loss', ':3.2f')
    trans_losses = AverageMeter('Trans Loss', ':3.2f')
    cls_accs = AverageMeter('Cls Acc', ':3.1f')

    progress = ProgressMeter(
        args.iters_per_epoch,
        [batch_time, data_time, losses, trans_losses, cls_accs],
        prefix="Epoch: [{}]".format(epoch))

    # define loss function
    mcc = MinimumClassConfusionLoss(temperature=args.temperature)
    consistency = CCConsistency(temperature=args.temperature, thr=args.thr)

    # switch to train mode
    model.train()

    end = time.time()
    for i in range(args.iters_per_epoch):
        x_s, labels_s = next(train_source_iter)[:2]
        (x_t, x_t_strong), labels_t = next(train_target_iter)[:2]

        x_s = x_s.to(device)
        x_t = x_t.to(device)
        x_t_strong = x_t_strong.to(device)
        labels_s = labels_s.to(device)

        # measure data loading time
        data_time.update(time.time() - end)

        # compute output
        x = torch.cat((x_s, x_t, x_t_strong), dim=0)
        y, f = model(x)
        y_s, y_t, y_t_strong = y.chunk(3, dim=0)

        cls_loss = F.cross_entropy(y_s, labels_s)
        mcc_loss = mcc(y_t)
        consistency_loss, selec_ratio = consistency(y_t, y_t_strong)
        loss = cls_loss + mcc_loss * args.trade_off + consistency_loss * args.trade_off_consistency
        transfer_loss = mcc_loss * args.trade_off + consistency_loss * args.trade_off_consistency

        cls_acc = accuracy(y_s, labels_s)[0]

        losses.update(loss.item(), x_s.size(0))
        cls_accs.update(cls_acc.item(), x_s.size(0))
        trans_losses.update(transfer_loss.item(), x_s.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        lr_scheduler.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            progress.display(i)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='CC Loss for Unsupervised Domain Adaptation')
    # dataset parameters
    parser.add_argument('root', metavar='DIR',
                        help='root path of dataset')
    parser.add_argument('-d', '--data', metavar='DATA', default='Office31', choices=utils.get_dataset_names(),
                        help='dataset: ' + ' | '.join(utils.get_dataset_names()) +
                             ' (default: Office31)')
    parser.add_argument('-s', '--source', help='source domain(s)', nargs='+')
    parser.add_argument('-t', '--target', help='target domain(s)', nargs='+')
    parser.add_argument('--train-resizing', type=str, default='default')
    parser.add_argument('--val-resizing', type=str, default='default')
    parser.add_argument('--resize-size', type=int, default=224,
                        help='the image size after resizing')
    parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT',
                        help='Random resize scale (default: 0.08 1.0)')
    parser.add_argument('--ratio', type=float, nargs='+', default=[3. / 4., 4. / 3.], metavar='RATIO',
                        help='Random resize aspect ratio (default: 0.75 1.33)')
    parser.add_argument('--no-hflip', action='store_true',
                        help='no random horizontal flipping during training')
    parser.add_argument('--norm-mean', type=float, nargs='+',
                        default=(0.485, 0.456, 0.406), help='normalization mean')
    parser.add_argument('--norm-std', type=float, nargs='+',
                        default=(0.229, 0.224, 0.225), help='normalization std')
    parser.add_argument('--auto-augment', default='rand-m10-n2-mstd2', type=str,
                        help='AutoAugment policy (default: rand-m10-n2-mstd2)')
    # model parameters
    parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
                        choices=utils.get_model_names(),
                        help='backbone architecture: ' +
                             ' | '.join(utils.get_model_names()) +
                             ' (default: resnet18)')
    parser.add_argument('--bottleneck-dim', default=256, type=int,
                        help='Dimension of bottleneck')
    parser.add_argument('--no-pool', action='store_true',
                        help='no pool layer after the feature extractor.')
    parser.add_argument('--scratch', action='store_true', help='whether train from scratch.')
    parser.add_argument('--temperature', default=2.5, type=float, help='parameter temperature scaling')
    parser.add_argument('--thr', default=0.95, type=float, help='thr parameter for consistency loss')
    parser.add_argument('--trade-off', default=1., type=float,
                        help='the trade-off hyper-parameter for original mcc loss')
    parser.add_argument('--trade_off_consistency', default=1., type=float,
                        help='the trade-off hyper-parameter for consistency loss')
    # training parameters
    parser.add_argument('-b', '--batch-size', default=36, type=int,
                        metavar='N',
                        help='mini-batch size (default: 36)')
    parser.add_argument('--lr', '--learning-rate', default=0.005, type=float,
                        metavar='LR', help='initial learning rate', dest='lr')
    parser.add_argument('--lr-gamma', default=0.001, type=float, help='parameter for lr scheduler')
    parser.add_argument('--lr-decay', default=0.75, type=float, help='parameter for lr scheduler')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-3, type=float,
                        metavar='W', help='weight decay (default: 1e-3)',
                        dest='weight_decay')
    parser.add_argument('-j', '--workers', default=2, type=int, metavar='N',
                        help='number of data loading workers (default: 2)')
    parser.add_argument('--epochs', default=20, type=int, metavar='N',
                        help='number of total epochs to run')
    parser.add_argument('-i', '--iters-per-epoch', default=1000, type=int,
                        help='Number of iterations per epoch')
    parser.add_argument('-p', '--print-freq', default=100, type=int,
                        metavar='N', help='print frequency (default: 100)')
    parser.add_argument('--seed', default=None, type=int,
                        help='seed for initializing training. ')
    parser.add_argument('--per-class-eval', action='store_true',
                        help='whether output per-class accuracy during evaluation')
    parser.add_argument("--log", type=str, default='mcc',
                        help="Where to save logs, checkpoints and debugging images.")
    parser.add_argument("--phase", type=str, default='train', choices=['train', 'test', 'analysis'],
                        help="When phase is 'test', only test the model."
                             "When phase is 'analysis', only analysis the model.")
    args = parser.parse_args()
    main(args)


================================================
FILE: examples/domain_adaptation/image_classification/cc_loss.sh
================================================
#!/usr/bin/env bash
# ResNet50, Office31, Single Source
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office31 -d Office31 -s A -t W -a resnet50 --epochs 20 -i 500 --seed 2 --bottleneck-dim 1024 --log logs/cc_loss/Office31_A2W
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office31 -d Office31 -s D -t W -a resnet50 --epochs 20 -i 500 --seed 2 --bottleneck-dim 1024 --log logs/cc_loss/Office31_D2W
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office31 -d Office31 -s W -t D -a resnet50 --epochs 20 -i 500 --seed 2 --bottleneck-dim 1024 --log logs/cc_loss/Office31_W2D
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office31 -d Office31 -s A -t D -a resnet50 --epochs 20 -i 500 --seed 2 --bottleneck-dim 1024 --log logs/cc_loss/Office31_A2D
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office31 -d Office31 -s D -t A -a resnet50 --epochs 20 -i 500 --seed 2 --bottleneck-dim 1024 --log logs/cc_loss/Office31_D2A
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office31 -d Office31 -s W -t A -a resnet50 --epochs 20 -i 500 --seed 2 --bottleneck-dim 1024 --log logs/cc_loss/Office31_W2A

# ResNet50, Office-Home, Single Source
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Ar -t Cl -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Ar2Cl
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Ar -t Pr -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Ar2Pr
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Ar -t Rw -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Ar2Rw
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Cl -t Ar -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Cl2Ar
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Cl -t Pr -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Cl2Pr
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Cl -t Rw -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Cl2Rw
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Pr -t Ar -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Pr2Ar
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Pr -t Cl -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Pr2Cl
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Pr -t Rw -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Pr2Rw
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Rw -t Ar -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Rw2Ar
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Rw -t Cl -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Rw2Cl
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Rw -t Pr -a resnet50 --epochs 30 --seed 0 --bottleneck-dim 2048 --log logs/cc_loss/OfficeHome_Rw2Pr

# ResNet101, VisDA-2017, Single Source
CUDA_VISIBLE_DEVICES=5 python cc_loss.py data/visda-2017 -d VisDA2017 -s Synthetic -t Real -a resnet101 \
    --epochs 30 --seed 0 --lr 0.002 --per-class-eval --temperature 3.0 --train-resizing cen.crop --log logs/cc_loss/VisDA2017

# ResNet101, DomainNet, Single Source
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s c -t p -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_c2p
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s c -t r -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_c2r
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s c -t s -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_c2s
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s p -t c -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_p2c
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s p -t r -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_p2r
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s p -t s -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_p2s
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s r -t c -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_r2c
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s r -t p -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_r2p
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s r -t s -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_r2s
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s s -t c -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_s2c
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s s -t p -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_s2p
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s s -t r -a resnet101 --epochs 30 -b 32 -i 2500 -p 500 --temperature 2.0 --lr 0.005 --bottleneck-dim 2048 --trade-off 10.0 --seed 0 --log logs/cc_loss/DomainNet_s2r

# ResNet50, ImageNet200 -> ImageNetR
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/ImageNetR -d ImageNetR -s IN -t INR -a resnet50 --epochs 30 --seed 0 --temperature 2.5 --bottleneck-dim 2048 --log logs/cc_loss/ImageNet_IN2INR

# ig_resnext101_32x8d, ImageNet -> ImageNetSketch
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/imagenet-sketch -d ImageNetSketch -s IN -t sketch -a ig_resnext101_32x8d --epochs 30 -i 2500 -p 500 --log logs/cc_loss_ig_resnext101_32x8d/ImageNet_IN2sketch

# Vision Transformer, Office-Home, Single Source
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Ar -t Cl -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Ar2Cl
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Ar -t Pr -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Ar2Pr
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Ar -t Rw -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Ar2Rw
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Cl -t Ar -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Cl2Ar
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Cl -t Pr -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Cl2Pr
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Cl -t Rw -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Cl2Rw
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Pr -t Ar -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Pr2Ar
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Pr -t Cl -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Pr2Cl
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Pr -t Rw -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Pr2Rw
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Rw -t Ar -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Rw2Ar
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Rw -t Cl -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Rw2Cl
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Rw -t Pr -a vit_base_patch16_224 --no-pool --bottleneck-dim 2048 --epochs 30 --seed 0 -b 24 --log logs/cc_loss_vit/OfficeHome_Rw2Pr

# ResNet50, Office-Home, Multi Source
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Cl Pr Rw -t Ar -a resnet50 --bottleneck-dim 2048 --epochs 30 --seed 0 --log logs/cc_loss/OfficeHome_:2Ar
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Ar Pr Rw -t Cl -a resnet50 --bottleneck-dim 2048 --epochs 30 --seed 0 --log logs/cc_loss/OfficeHome_:2Cl
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Ar Cl Rw -t Pr -a resnet50 --bottleneck-dim 2048 --epochs 30 --seed 0 --log logs/cc_loss/OfficeHome_:2Pr
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office-home -d OfficeHome -s Ar Cl Pr -t Rw -a resnet50 --bottleneck-dim 2048 --epochs 30 --seed 0 --log logs/cc_loss/OfficeHome_:2Rw

# ResNet101, DomainNet, Multi Source
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s i p q r s -t c -a resnet101 --bottleneck-dim 2048 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/cc_loss/DomainNet_:2c
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s c p q r s -t i -a resnet101 --bottleneck-dim 2048 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/cc_loss/DomainNet_:2i
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s c i q r s -t p -a resnet101 --bottleneck-dim 2048 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/cc_loss/DomainNet_:2p
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s c i p r s -t q -a resnet101 --bottleneck-dim 2048 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/cc_loss/DomainNet_:2q
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s c i p q s -t r -a resnet101 --bottleneck-dim 2048 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/cc_loss/DomainNet_:2r
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/domainnet -d DomainNet -s c i p q r -t s -a resnet101 --bottleneck-dim 2048 --epochs 40 -i 5000 -p 500 --seed 0 --log logs/cc_loss/DomainNet_:2s

# Digits
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/digits -d Digits -s MNIST -t USPS --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 28 --no-hflip --norm-mean 0.5 --norm-std 0.5 -a lenet --no-pool --lr 0.01 -b 128 -i 2500 --scratch --seed 0 --log logs/cc_loss/MNIST2USPS
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/digits -d Digits -s USPS -t MNIST --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 28 --no-hflip --norm-mean 0.5 --norm-std 0.5 -a lenet --no-pool --lr 0.1 -b 128 -i 2500 --scratch --seed 0 --log logs/cc_loss/USPS2MNIST
CUDA_VISIBLE_DEVICES=0 python cc_loss.py data/digits -d Digits -s SVHNRGB -t MNISTRGB --train-resizing 'res.' --val-resizing 'res.' \
  --resize-size 32 --no-hflip --norm-mean 0.5 0.5 0.5 --norm-std 0.5 0.5 0.5 -a dtn --no-pool --lr 0.01 -b 128 -i 2500 --scratch --seed 0 --log logs/cc_loss/SVHN2MNIST



================================================
FILE: examples/domain_adaptation/image_classification/cdan.py
================================================
"""
@author: Junguang Jiang
@contact: JiangJunguang1123@outlook.com
"""
import random
import time
import warnings
import argparse
import shutil
import os.path as osp

import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.optim import SGD
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
import torch.nn.functional as F

import utils
from tllib.modules.domain_discriminator import DomainDiscriminator
from tllib.alignment.cdan import ConditionalDomainAdversarialLoss, ImageClassifier
from tllib.utils.data import ForeverDataIterator
from tllib.utils.metric import accuracy
from tllib.utils.meter import AverageMeter, ProgressMeter
from tllib.utils.logger import CompleteLogger
from tllib.utils.analysis import collect_feature, tsne, a_distance

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")


def main(args: argparse.Namespace):
    logger = CompleteLogger(args.log, args.phase)
    print(args)

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    cudnn.benchmark = True

    # Data loading code
    train_transform = utils.get_train_transform(args.train_resizing, scale=args.scale, ratio=args.ratio,
                                                random_horizontal_flip=not args.no_hflip,
                                                random_color_jitter=False, resize_size=args.resize_size,
                                                norm_mean=args.norm_mean, norm_std=args.norm_std)
    val_transform = utils.get_val_transform(args.val_resizing, resize_size=args.resize_size,
                                            norm_mean=args.norm_mean, norm_std=args.norm_std)
    print("train_transform: ", train_transform)
    print("val_transform: ", val_transform)

    train_source_dataset, train_target_dataset, val_dataset, test_dataset, num_classes, args.class_names = \
        utils.get_dataset(args.data, args.root, args.source, args.target, train_transform, val_transform)
    train_source_loader = DataLoader(train_source_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    train_target_loader = DataLoader(train_target_dataset, batch_size=args.batch_size,
                                     shuffle=True, num_workers=args.workers, drop_last=True)
    val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)
    test_loader = DataLoader(test_dataset, batch_size=args.batch_size, shuffle=False, num_workers=args.workers)

    train_source_iter = ForeverDataIterator(train_source_loader)
    train_target_iter = ForeverDataIterator(train_target_loader)

    # create model
    print("=> using model '{}'".format(args.arch))
    backbone = utils.get_model(args.arch, pretrain=not args.scratch)
    pool_layer = nn.Identity() if args.no_pool else None
    classifier = ImageClassifier(backbone, num_classes, bottleneck_dim=args.bottleneck_dim,
                                 pool_layer=pool_layer, finetune=not args.scratch).to(device)
    classifier_feature_dim = classifier.features_dim

    if args.randomized:
        domain_discri = DomainDiscriminator(args.randomized_dim, hidden_size=1024).to(device)
    else:
        domain_discri = DomainDiscriminator(classifier_feature_dim * num_classes, hidden_size=1024).to(device)

    all_parameters = classifier.get_parameters() + domain_discri.get_parameters()
    # define optimizer and lr scheduler
    optimizer = SGD(all_parameters, args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True)
    lr_scheduler = LambdaLR(optimizer, lambda x: args.lr * (1. + args.lr_gamma * float(x)) ** (-args.lr_decay))

    # define loss function
    domain_adv = ConditionalDomainAdversarialLoss(
        domain_discri, entropy_conditioning=args.entropy,
        num_classes=num_classes, features_dim=classifier_feature_dim, randomized=args.randomized,
        randomized_dim=args.randomized_dim
    ).to(device)

    # resume from the best checkpoint
    if args.phase != 'train':
        checkpoint = torch.load(logger.get_checkpoint_path('best'), map_location='cpu')
        classifier.load_state_dict(checkpoint)

    # analysis the model
    if args.phase == 'analysis':
        # extract features from both domains
        feature_extractor = nn.Sequential(classifier.backbone, classifier.pool_layer, classifier.bottleneck).to(device)
        source_feature = collect_feature(train_source_loader, feature_extractor, device)
        target_feature = collect_feature(train_target_loader, feature_extractor, device)
        # plot t-SNE
        tSNE_filename = osp.join(logger.visualize_directory, 'TSNE.pdf')
        tsne.visualize(source_feature, target_feature, tSNE_filename)
        print("Saving t-SNE to", tSNE_filename)
        # calculate A-distance, which is a measure for distribution discrepancy
        A_distance = a_distance.calculate(source_feature, target_feature, device)
        print("A-distance =", A_distance)
        return

    if args.phase == 'test':
        acc1 = utils.validate(test_loader, classifier, args, device)
        print(acc1)
        return

    # start training
    best_acc1 = 0.
    for epoch in range(args.epochs):
        print("lr:", lr_scheduler.get_last_lr()[0])
        # train for one epoch
        train(train_source_iter, train_target_iter, classifier, domain_adv, optimizer,
              lr_scheduler, epoch, args)

        # evaluate on validation set
        acc1 = utils.validate(val_loader, classifier, args, device)

        # remember best acc@1 and save checkpoint
        torch.save(classifier.state_dict(), logger.get_checkpoint_path('latest'))
        if acc1 > best_acc1:
            shutil.copy(logger.get_checkpoint_path('latest'), logger.get_checkpoint_path('best'))
        best_acc1 = max(acc1, best_acc1)

    print("best_acc1 = {:3.1f}".format(best_acc1))

    # evaluate on test set
    classifier.load_state_dict(torch.load(logger.get_checkpoint_path('best')))
    acc1 = utils.validate(test_loader, classifier, args, device)
    print("test_acc1 = {:3.1f}".format(acc1))

    logger.close()


def train(train_source_iter: ForeverDataIterator, train_target_iter: ForeverDataIterator, model: ImageClassifier,
          domain_adv: ConditionalDomainAdversarialLoss, optimizer: SGD,
          lr_scheduler: LambdaLR, epoch: int, args: argparse.Namespace):
    batch_time = AverageMeter('Time', ':3.1f')
    data_time = AverageMeter('Data', ':3.1f')
    losses = AverageMeter('Loss', ':3.2f')
    trans_losses = AverageMeter('Trans Loss', ':3.2f')
    cls_accs = AverageMeter('Cls Acc', ':3.1f')
    domain_accs = AverageMeter('Domain Acc', ':3.1f')
    progress = ProgressMeter(
        args.iters_per_epoch,
        [batch_time, data_time, losses, trans_losses, cls_accs, domain_accs],
        prefix="Epoch: [{}]".format(epoch))

    # switch to train mode
    model.train()
    domain_adv.train()

    end = time.time()
    for i in range(args.iters_per_epoch):
        x_s, labels_s = next(train_source_iter)[:2]
        x_t, = next(train_target_iter)[:1]

        x_s = x_s.to(device)
        x_t = x_t.to(device)
        labels_s = labels_s.to(device)

        # measure data loading time
        data_time.update(time.time() - end)

        # compute output
        x = torch.cat((x_s, x_t), dim=0)
        y, f = model(x)
        y_s, y_t = y.chunk(2, dim=0)
        f_s, f_t = f.chunk(2, dim=0)

        cls_loss = F.cross_entropy(y_s, labels_s)
        transfer_loss = domain_adv(y_s, f_s, y_t, f_t)
        domain_acc = domain_adv.domain_discriminator_accuracy
        loss = cls_loss + transfer_loss * args.trade_off

        cls_acc = accuracy(y_s, labels_s)[0]

        losses.update(loss.item(), x_s.size(0))
        cls_accs.update(cls_acc, x_s.size(0))
        domain_accs.update(domain_acc, x_s.size(0))
        trans_losses.update(transfer_loss.item(), x_s.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        lr_scheduler.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            progress.display(i)


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='CDAN for Unsupervised Domain Adaptation')
    # dataset parameters
    parser.add_argument('root', metavar='DIR',
                        help='root path of dataset')
    parser.add_argument('-d', '--data', metavar='DATA', default='Office31', choices=utils.get_dataset_names(),
                        help='dataset: ' + ' | '.join(utils.get_dataset_names()) +
                             ' (default: Office31)')
    parser.add_argument('-s', '--source', help='source domain(s)', nargs='+'
Download .txt
gitextract_h0zbpfvz/

├── .github/
│   └── ISSUE_TEMPLATE/
│       ├── bug_report.md
│       ├── custom.md
│       └── feature_request.md
├── .gitignore
├── CONTRIBUTING.md
├── DATASETS.md
├── LICENSE
├── README.md
├── docs/
│   ├── Makefile
│   ├── conf.py
│   ├── index.rst
│   ├── make.bat
│   ├── requirements.txt
│   └── tllib/
│       ├── alignment/
│       │   ├── domain_adversarial.rst
│       │   ├── hypothesis_adversarial.rst
│       │   ├── index.rst
│       │   └── statistics_matching.rst
│       ├── modules.rst
│       ├── normalization.rst
│       ├── ranking.rst
│       ├── regularization.rst
│       ├── reweight.rst
│       ├── self_training.rst
│       ├── translation.rst
│       ├── utils/
│       │   ├── analysis.rst
│       │   ├── base.rst
│       │   ├── index.rst
│       │   └── metric.rst
│       └── vision/
│           ├── datasets.rst
│           ├── index.rst
│           ├── models.rst
│           └── transforms.rst
├── examples/
│   ├── domain_adaptation/
│   │   ├── image_classification/
│   │   │   ├── README.md
│   │   │   ├── adda.py
│   │   │   ├── adda.sh
│   │   │   ├── afn.py
│   │   │   ├── afn.sh
│   │   │   ├── bsp.py
│   │   │   ├── bsp.sh
│   │   │   ├── cc_loss.py
│   │   │   ├── cc_loss.sh
│   │   │   ├── cdan.py
│   │   │   ├── cdan.sh
│   │   │   ├── dan.py
│   │   │   ├── dan.sh
│   │   │   ├── dann.py
│   │   │   ├── dann.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── fixmatch.py
│   │   │   ├── fixmatch.sh
│   │   │   ├── jan.py
│   │   │   ├── jan.sh
│   │   │   ├── mcc.py
│   │   │   ├── mcc.sh
│   │   │   ├── mcd.py
│   │   │   ├── mcd.sh
│   │   │   ├── mdd.py
│   │   │   ├── mdd.sh
│   │   │   ├── requirements.txt
│   │   │   ├── self_ensemble.py
│   │   │   ├── self_ensemble.sh
│   │   │   └── utils.py
│   │   ├── image_regression/
│   │   │   ├── README.md
│   │   │   ├── dann.py
│   │   │   ├── dann.sh
│   │   │   ├── dd.py
│   │   │   ├── dd.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── rsd.py
│   │   │   ├── rsd.sh
│   │   │   └── utils.py
│   │   ├── keypoint_detection/
│   │   │   ├── README.md
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── regda.py
│   │   │   ├── regda.sh
│   │   │   ├── regda_fast.py
│   │   │   └── regda_fast.sh
│   │   ├── object_detection/
│   │   │   ├── README.md
│   │   │   ├── config/
│   │   │   │   ├── faster_rcnn_R_101_C4_cityscapes.yaml
│   │   │   │   ├── faster_rcnn_R_101_C4_voc.yaml
│   │   │   │   ├── faster_rcnn_vgg_16_cityscapes.yaml
│   │   │   │   └── retinanet_R_101_FPN_voc.yaml
│   │   │   ├── cycle_gan.py
│   │   │   ├── cycle_gan.sh
│   │   │   ├── d_adapt/
│   │   │   │   ├── README.md
│   │   │   │   ├── bbox_adaptation.py
│   │   │   │   ├── category_adaptation.py
│   │   │   │   ├── config/
│   │   │   │   │   ├── faster_rcnn_R_101_C4_cityscapes.yaml
│   │   │   │   │   ├── faster_rcnn_R_101_C4_voc.yaml
│   │   │   │   │   ├── faster_rcnn_vgg_16_cityscapes.yaml
│   │   │   │   │   └── retinanet_R_101_FPN_voc.yaml
│   │   │   │   ├── d_adapt.py
│   │   │   │   └── d_adapt.sh
│   │   │   ├── oracle.sh
│   │   │   ├── prepare_cityscapes_to_voc.py
│   │   │   ├── requirements.txt
│   │   │   ├── source_only.py
│   │   │   ├── source_only.sh
│   │   │   ├── utils.py
│   │   │   ├── visualize.py
│   │   │   └── visualize.sh
│   │   ├── openset_domain_adaptation/
│   │   │   ├── README.md
│   │   │   ├── dann.py
│   │   │   ├── dann.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── osbp.py
│   │   │   ├── osbp.sh
│   │   │   └── utils.py
│   │   ├── partial_domain_adaptation/
│   │   │   ├── README.md
│   │   │   ├── afn.py
│   │   │   ├── afn.sh
│   │   │   ├── dann.py
│   │   │   ├── dann.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── iwan.py
│   │   │   ├── iwan.sh
│   │   │   ├── pada.py
│   │   │   ├── pada.sh
│   │   │   ├── requirements.txt
│   │   │   └── utils.py
│   │   ├── re_identification/
│   │   │   ├── README.md
│   │   │   ├── baseline.py
│   │   │   ├── baseline.sh
│   │   │   ├── baseline_cluster.py
│   │   │   ├── baseline_cluster.sh
│   │   │   ├── ibn.sh
│   │   │   ├── mmt.py
│   │   │   ├── mmt.sh
│   │   │   ├── requirements.txt
│   │   │   ├── spgan.py
│   │   │   ├── spgan.sh
│   │   │   └── utils.py
│   │   ├── semantic_segmentation/
│   │   │   ├── README.md
│   │   │   ├── advent.py
│   │   │   ├── advent.sh
│   │   │   ├── cycada.py
│   │   │   ├── cycada.sh
│   │   │   ├── cycle_gan.py
│   │   │   ├── cycle_gan.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── fda.py
│   │   │   └── fda.sh
│   │   ├── wilds_image_classification/
│   │   │   ├── README.md
│   │   │   ├── cdan.py
│   │   │   ├── cdan.sh
│   │   │   ├── dan.py
│   │   │   ├── dan.sh
│   │   │   ├── dann.py
│   │   │   ├── dann.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── fixmatch.py
│   │   │   ├── fixmatch.sh
│   │   │   ├── jan.py
│   │   │   ├── jan.sh
│   │   │   ├── mdd.py
│   │   │   ├── mdd.sh
│   │   │   ├── requirements.txt
│   │   │   └── utils.py
│   │   ├── wilds_ogb_molpcba/
│   │   │   ├── README.md
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── gin.py
│   │   │   ├── requirements.txt
│   │   │   └── utils.py
│   │   ├── wilds_poverty/
│   │   │   ├── README.md
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── requirements.txt
│   │   │   ├── resnet_ms.py
│   │   │   └── utils.py
│   │   └── wilds_text/
│   │       ├── README.md
│   │       ├── erm.py
│   │       ├── erm.sh
│   │       ├── requirements.txt
│   │       └── utils.py
│   ├── domain_generalization/
│   │   ├── image_classification/
│   │   │   ├── README.md
│   │   │   ├── coral.py
│   │   │   ├── coral.sh
│   │   │   ├── erm.py
│   │   │   ├── erm.sh
│   │   │   ├── groupdro.py
│   │   │   ├── groupdro.sh
│   │   │   ├── ibn.sh
│   │   │   ├── irm.py
│   │   │   ├── irm.sh
│   │   │   ├── mixstyle.py
│   │   │   ├── mixstyle.sh
│   │   │   ├── mldg.py
│   │   │   ├── mldg.sh
│   │   │   ├── requirements.txt
│   │   │   ├── utils.py
│   │   │   ├── vrex.py
│   │   │   └── vrex.sh
│   │   └── re_identification/
│   │       ├── README.md
│   │       ├── baseline.py
│   │       ├── baseline.sh
│   │       ├── ibn.sh
│   │       ├── mixstyle.py
│   │       ├── mixstyle.sh
│   │       ├── requirements.txt
│   │       └── utils.py
│   ├── model_selection/
│   │   ├── README.md
│   │   ├── hscore.py
│   │   ├── hscore.sh
│   │   ├── leep.py
│   │   ├── leep.sh
│   │   ├── logme.py
│   │   ├── logme.sh
│   │   ├── nce.py
│   │   ├── nce.sh
│   │   ├── requirements.txt
│   │   └── utils.py
│   ├── semi_supervised_learning/
│   │   └── image_classification/
│   │       ├── README.md
│   │       ├── convert_moco_to_pretrained.py
│   │       ├── debiasmatch.py
│   │       ├── debiasmatch.sh
│   │       ├── dst.py
│   │       ├── dst.sh
│   │       ├── erm.py
│   │       ├── erm.sh
│   │       ├── fixmatch.py
│   │       ├── fixmatch.sh
│   │       ├── flexmatch.py
│   │       ├── flexmatch.sh
│   │       ├── mean_teacher.py
│   │       ├── mean_teacher.sh
│   │       ├── noisy_student.py
│   │       ├── noisy_student.sh
│   │       ├── pi_model.py
│   │       ├── pi_model.sh
│   │       ├── pseudo_label.py
│   │       ├── pseudo_label.sh
│   │       ├── requirements.txt
│   │       ├── self_tuning.py
│   │       ├── self_tuning.sh
│   │       ├── uda.py
│   │       ├── uda.sh
│   │       └── utils.py
│   └── task_adaptation/
│       └── image_classification/
│           ├── README.md
│           ├── bi_tuning.py
│           ├── bi_tuning.sh
│           ├── bss.py
│           ├── bss.sh
│           ├── co_tuning.py
│           ├── co_tuning.sh
│           ├── convert_moco_to_pretrained.py
│           ├── delta.py
│           ├── delta.sh
│           ├── erm.py
│           ├── erm.sh
│           ├── lwf.py
│           ├── lwf.sh
│           ├── requirements.txt
│           ├── stochnorm.py
│           ├── stochnorm.sh
│           └── utils.py
├── requirements.txt
├── setup.py
└── tllib/
    ├── __init__.py
    ├── alignment/
    │   ├── __init__.py
    │   ├── adda.py
    │   ├── advent.py
    │   ├── bsp.py
    │   ├── cdan.py
    │   ├── coral.py
    │   ├── d_adapt/
    │   │   ├── __init__.py
    │   │   ├── feedback.py
    │   │   ├── modeling/
    │   │   │   ├── __init__.py
    │   │   │   ├── matcher.py
    │   │   │   ├── meta_arch/
    │   │   │   │   ├── __init__.py
    │   │   │   │   ├── rcnn.py
    │   │   │   │   └── retinanet.py
    │   │   │   └── roi_heads/
    │   │   │       ├── __init__.py
    │   │   │       ├── fast_rcnn.py
    │   │   │       └── roi_heads.py
    │   │   └── proposal.py
    │   ├── dan.py
    │   ├── dann.py
    │   ├── jan.py
    │   ├── mcd.py
    │   ├── mdd.py
    │   ├── osbp.py
    │   ├── regda.py
    │   └── rsd.py
    ├── modules/
    │   ├── __init__.py
    │   ├── classifier.py
    │   ├── domain_discriminator.py
    │   ├── entropy.py
    │   ├── gl.py
    │   ├── grl.py
    │   ├── kernels.py
    │   ├── loss.py
    │   └── regressor.py
    ├── normalization/
    │   ├── __init__.py
    │   ├── afn.py
    │   ├── ibn.py
    │   ├── mixstyle/
    │   │   ├── __init__.py
    │   │   ├── resnet.py
    │   │   └── sampler.py
    │   └── stochnorm.py
    ├── ranking/
    │   ├── __init__.py
    │   ├── hscore.py
    │   ├── leep.py
    │   ├── logme.py
    │   ├── nce.py
    │   └── transrate.py
    ├── regularization/
    │   ├── __init__.py
    │   ├── bi_tuning.py
    │   ├── bss.py
    │   ├── co_tuning.py
    │   ├── delta.py
    │   ├── knowledge_distillation.py
    │   └── lwf.py
    ├── reweight/
    │   ├── __init__.py
    │   ├── groupdro.py
    │   ├── iwan.py
    │   └── pada.py
    ├── self_training/
    │   ├── __init__.py
    │   ├── cc_loss.py
    │   ├── dst.py
    │   ├── flexmatch.py
    │   ├── mcc.py
    │   ├── mean_teacher.py
    │   ├── pi_model.py
    │   ├── pseudo_label.py
    │   ├── self_ensemble.py
    │   ├── self_tuning.py
    │   └── uda.py
    ├── translation/
    │   ├── __init__.py
    │   ├── cycada.py
    │   ├── cyclegan/
    │   │   ├── __init__.py
    │   │   ├── discriminator.py
    │   │   ├── generator.py
    │   │   ├── loss.py
    │   │   ├── transform.py
    │   │   └── util.py
    │   ├── fourier_transform.py
    │   └── spgan/
    │       ├── __init__.py
    │       ├── loss.py
    │       └── siamese.py
    ├── utils/
    │   ├── __init__.py
    │   ├── analysis/
    │   │   ├── __init__.py
    │   │   ├── a_distance.py
    │   │   └── tsne.py
    │   ├── data.py
    │   ├── logger.py
    │   ├── meter.py
    │   ├── metric/
    │   │   ├── __init__.py
    │   │   ├── keypoint_detection.py
    │   │   └── reid.py
    │   └── scheduler.py
    └── vision/
        ├── __init__.py
        ├── datasets/
        │   ├── __init__.py
        │   ├── _util.py
        │   ├── aircrafts.py
        │   ├── caltech101.py
        │   ├── cifar.py
        │   ├── coco70.py
        │   ├── cub200.py
        │   ├── digits.py
        │   ├── domainnet.py
        │   ├── dtd.py
        │   ├── eurosat.py
        │   ├── food101.py
        │   ├── imagelist.py
        │   ├── imagenet_r.py
        │   ├── imagenet_sketch.py
        │   ├── keypoint_detection/
        │   │   ├── __init__.py
        │   │   ├── freihand.py
        │   │   ├── hand_3d_studio.py
        │   │   ├── human36m.py
        │   │   ├── keypoint_dataset.py
        │   │   ├── lsp.py
        │   │   ├── rendered_hand_pose.py
        │   │   ├── surreal.py
        │   │   └── util.py
        │   ├── object_detection/
        │   │   └── __init__.py
        │   ├── office31.py
        │   ├── officecaltech.py
        │   ├── officehome.py
        │   ├── openset/
        │   │   └── __init__.py
        │   ├── oxfordflowers.py
        │   ├── oxfordpets.py
        │   ├── pacs.py
        │   ├── partial/
        │   │   ├── __init__.py
        │   │   ├── caltech_imagenet.py
        │   │   └── imagenet_caltech.py
        │   ├── patchcamelyon.py
        │   ├── regression/
        │   │   ├── __init__.py
        │   │   ├── dsprites.py
        │   │   ├── image_regression.py
        │   │   └── mpi3d.py
        │   ├── reid/
        │   │   ├── __init__.py
        │   │   ├── basedataset.py
        │   │   ├── convert.py
        │   │   ├── dukemtmc.py
        │   │   ├── market1501.py
        │   │   ├── msmt17.py
        │   │   ├── personx.py
        │   │   └── unreal.py
        │   ├── resisc45.py
        │   ├── retinopathy.py
        │   ├── segmentation/
        │   │   ├── __init__.py
        │   │   ├── cityscapes.py
        │   │   ├── gta5.py
        │   │   ├── segmentation_list.py
        │   │   └── synthia.py
        │   ├── stanford_cars.py
        │   ├── stanford_dogs.py
        │   ├── sun397.py
        │   └── visda2017.py
        ├── models/
        │   ├── __init__.py
        │   ├── digits.py
        │   ├── keypoint_detection/
        │   │   ├── __init__.py
        │   │   ├── loss.py
        │   │   └── pose_resnet.py
        │   ├── object_detection/
        │   │   ├── __init__.py
        │   │   ├── backbone/
        │   │   │   ├── __init__.py
        │   │   │   ├── mmdetection/
        │   │   │   │   ├── vgg.py
        │   │   │   │   └── weight_init.py
        │   │   │   └── vgg.py
        │   │   ├── meta_arch/
        │   │   │   ├── __init__.py
        │   │   │   ├── rcnn.py
        │   │   │   └── retinanet.py
        │   │   ├── proposal_generator/
        │   │   │   ├── __init__.py
        │   │   │   └── rpn.py
        │   │   └── roi_heads/
        │   │       ├── __init__.py
        │   │       └── roi_heads.py
        │   ├── reid/
        │   │   ├── __init__.py
        │   │   ├── identifier.py
        │   │   ├── loss.py
        │   │   └── resnet.py
        │   ├── resnet.py
        │   └── segmentation/
        │       ├── __init__.py
        │       └── deeplabv2.py
        └── transforms/
            ├── __init__.py
            ├── keypoint_detection.py
            └── segmentation.py
Download .txt
SYMBOL INDEX (1435 symbols across 252 files)

FILE: examples/domain_adaptation/image_classification/adda.py
  function set_requires_grad (line 36) | def set_requires_grad(net, requires_grad=False):
  function main (line 44) | def main(args: argparse.Namespace):
  function train (line 186) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/afn.py
  function main (line 32) | def main(args: argparse.Namespace):
  function train (line 132) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/bsp.py
  function main (line 33) | def main(args: argparse.Namespace):
  function train (line 167) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/cc_loss.py
  function main (line 33) | def main(args: argparse.Namespace):
  function train (line 147) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/cdan.py
  function main (line 32) | def main(args: argparse.Namespace):
  function train (line 147) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/dan.py
  function main (line 32) | def main(args: argparse.Namespace):
  function train (line 138) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/dann.py
  function main (line 32) | def main(args: argparse.Namespace):
  function train (line 138) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/erm.py
  function main (line 27) | def main(args):

FILE: examples/domain_adaptation/image_classification/fixmatch.py
  class ImageClassifier (line 33) | class ImageClassifier(Classifier):
    method __init__ (line 34) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...
    method forward (line 42) | def forward(self, x: torch.Tensor):
  function main (line 50) | def main(args: argparse.Namespace):
  function train (line 165) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/jan.py
  function main (line 32) | def main(args: argparse.Namespace):
  function train (line 149) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/mcc.py
  function main (line 31) | def main(args: argparse.Namespace):
  function train (line 136) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/mcd.py
  function main (line 32) | def main(args: argparse.Namespace):
  function train (line 147) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...
  function validate (line 240) | def validate(val_loader: DataLoader, G: nn.Module, F1: ImageClassifierHead,

FILE: examples/domain_adaptation/image_classification/mdd.py
  function main (line 32) | def main(args: argparse.Namespace):
  function train (line 135) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/self_ensemble.py
  function main (line 35) | def main(args: argparse.Namespace):
  function train (line 176) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_classification/utils.py
  function get_model_names (line 26) | def get_model_names():
  function get_model (line 34) | def get_model(model_name, pretrain=True):
  function get_dataset_names (line 50) | def get_dataset_names():
  function get_dataset (line 57) | def get_dataset(dataset_name, root, source, target, train_source_transfo...
  function validate (line 96) | def validate(val_loader, model, args, device) -> float:
  function get_train_transform (line 144) | def get_train_transform(resizing='default', scale=(0.08, 1.0), ratio=(3....
  function get_val_transform (line 196) | def get_val_transform(resizing='default', resize_size=224,
  function empirical_risk_minimization (line 219) | def empirical_risk_minimization(train_source_iter, model, optimizer, lr_...

FILE: examples/domain_adaptation/image_regression/dann.py
  function main (line 35) | def main(args: argparse.Namespace):
  function train (line 156) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_regression/dd.py
  function main (line 33) | def main(args: argparse.Namespace):
  function train (line 187) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_regression/erm.py
  function main (line 33) | def main(args: argparse.Namespace):
  function train (line 141) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_regression/rsd.py
  function main (line 34) | def main(args: argparse.Namespace):
  function train (line 153) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/image_regression/utils.py
  function convert_model (line 16) | def convert_model(module):
  function validate (line 31) | def validate(val_loader, model, args, factors, device):

FILE: examples/domain_adaptation/keypoint_detection/erm.py
  function main (line 34) | def main(args: argparse.Namespace):
  function train (line 169) | def train(train_source_iter, train_target_iter, model, criterion,
  function validate (line 222) | def validate(val_loader, model, criterion, visualize, args: argparse.Nam...

FILE: examples/domain_adaptation/keypoint_detection/regda.py
  function main (line 36) | def main(args: argparse.Namespace):
  function pretrain (line 227) | def pretrain(train_source_iter, model, criterion, optimizer,
  function train (line 277) | def train(train_source_iter, train_target_iter, model, criterion,regress...
  function validate (line 379) | def validate(val_loader, model, criterion, visualize, args: argparse.Nam...

FILE: examples/domain_adaptation/keypoint_detection/regda_fast.py
  function main (line 36) | def main(args: argparse.Namespace):
  function pretrain (line 227) | def pretrain(train_source_iter, model, criterion, optimizer,
  function train (line 277) | def train(train_source_iter, train_target_iter, model, criterion,regress...
  function validate (line 379) | def validate(val_loader, model, criterion, visualize, args: argparse.Nam...

FILE: examples/domain_adaptation/object_detection/cycle_gan.py
  function make_power_2 (line 43) | def make_power_2(img, base, method=Image.BICUBIC):
  class VOCImageFolder (line 52) | class VOCImageFolder(datasets.VisionDataset):
    method __init__ (line 56) | def __init__(self, root: str, phase='trainval',
    method __getitem__ (line 64) | def __getitem__(self, index: int) -> Tuple[Any, str]:
    method __len__ (line 76) | def __len__(self) -> int:
    method parse_data_file (line 79) | def parse_data_file(self, file_name: str, extension: str) -> List[str]:
    method translate (line 99) | def translate(self, transform: Callable, target_root: str, image_base=4):
  function main (line 119) | def main(args):
  function train (line 247) | def train(train_source_iter, train_target_iter, netG_S2T, netG_T2S, netD...
  function build_dataset (line 346) | def build_dataset(dataset_names, dataset_roots, transform):

FILE: examples/domain_adaptation/object_detection/d_adapt/bbox_adaptation.py
  class BoxTransform (line 36) | class BoxTransform(nn.Module):
    method __init__ (line 37) | def __init__(self):
    method forward (line 42) | def forward(self, pred_delta, gt_classes, proposal_boxes):
  function iou_between (line 59) | def iou_between(
  function clamp_single (line 92) | def clamp_single(box, w, h):
  function clamp (line 101) | def clamp(boxes, widths, heights):
  class BoundingBoxAdaptor (line 109) | class BoundingBoxAdaptor:
    method __init__ (line 110) | def __init__(self, class_names, log, args):
    method load_checkpoint (line 157) | def load_checkpoint(self, path=None):
    method prepare_training_data (line 167) | def prepare_training_data(self, proposal_list: PersistentProposalList,...
    method prepare_validation_data (line 197) | def prepare_validation_data(self, proposal_list: PersistentProposalList):
    method prepare_test_data (line 218) | def prepare_test_data(self, proposal_list: PersistentProposalList):
    method predict (line 231) | def predict(self, data_loader):
    method validate_baseline (line 250) | def validate_baseline(self, val_loader):
    method validate (line 263) | def validate(val_loader, model, box_transform, args) -> float:
    method fit (line 300) | def fit(self, data_loader_source, data_loader_target, data_loader_vali...
    method get_parser (line 482) | def get_parser() -> argparse.ArgumentParser:

FILE: examples/domain_adaptation/object_detection/d_adapt/category_adaptation.py
  class ConfidenceBasedDataSelector (line 41) | class ConfidenceBasedDataSelector:
    method __init__ (line 43) | def __init__(self, confidence_ratio=0.1, category_names=()):
    method extend (line 50) | def extend(self, categories, scores):
    method calculate (line 54) | def calculate(self):
    method whether_select (line 74) | def whether_select(self, categories, scores):
  class RobustCrossEntropyLoss (line 79) | class RobustCrossEntropyLoss(nn.CrossEntropyLoss):
    method __init__ (line 81) | def __init__(self, *args, offset=0.1, **kwargs):
    method forward (line 85) | def forward(self, input: Tensor, target: Tensor) -> Tensor:
  class CategoryAdaptor (line 90) | class CategoryAdaptor:
    method __init__ (line 91) | def __init__(self, class_names, log, args):
    method load_checkpoint (line 108) | def load_checkpoint(self):
    method prepare_training_data (line 116) | def prepare_training_data(self, proposal_list: List[Proposal], labeled...
    method prepare_validation_data (line 157) | def prepare_validation_data(self, proposal_list: List[Proposal]):
    method prepare_test_data (line 178) | def prepare_test_data(self, proposal_list: List[Proposal]):
    method fit (line 191) | def fit(self, data_loader_source, data_loader_target, data_loader_vali...
    method predict (line 318) | def predict(self, data_loader):
    method validate (line 335) | def validate(val_loader, model, class_names, args) -> float:
    method get_parser (line 377) | def get_parser() -> argparse.ArgumentParser:

FILE: examples/domain_adaptation/object_detection/d_adapt/d_adapt.py
  function generate_proposals (line 39) | def generate_proposals(model, num_classes, dataset_names, cache_root, cfg):
  function generate_category_labels (line 55) | def generate_category_labels(prop, category_adaptor, cache_filename):
  function generate_bounding_box_labels (line 70) | def generate_bounding_box_labels(prop, bbox_adaptor, class_names, cache_...
  function train (line 87) | def train(model, logger, cfg, args, args_cls, args_box):
  function main (line 251) | def main(args, args_cls, args_box):

FILE: examples/domain_adaptation/object_detection/prepare_cityscapes_to_voc.py
  function make_dir (line 16) | def make_dir(path):
  function polygon_to_bbox (line 25) | def polygon_to_bbox(polygon):
  function read_json (line 33) | def read_json(file):
  function save_xml (line 61) | def save_xml(img_path, img_shape, data, save_path):
  function prepare_cityscapes_to_voc (line 68) | def prepare_cityscapes_to_voc(cityscapes_dir, save_path, suffix, image_d...

FILE: examples/domain_adaptation/object_detection/source_only.py
  function train (line 29) | def train(model, logger, cfg, args):
  function main (line 117) | def main(args):

FILE: examples/domain_adaptation/object_detection/utils.py
  class PascalVOCDetectionPerClassEvaluator (line 37) | class PascalVOCDetectionPerClassEvaluator(PascalVOCDetectionEvaluator):
    method evaluate (line 48) | def evaluate(self):
  function validate (line 98) | def validate(model, logger, cfg, args):
  function build_dataset (line 116) | def build_dataset(dataset_categories, dataset_roots):
  function rgb2gray (line 127) | def rgb2gray(rgb):
  class Grayscale (line 131) | class Grayscale(Augmentation):
    method __init__ (line 132) | def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):
    method get_transform (line 137) | def get_transform(self, image):
  function build_augmentation (line 141) | def build_augmentation(cfg, is_train):
  function setup (line 182) | def setup(args):
  function build_lr_scheduler (line 196) | def build_lr_scheduler(
  function get_model_names (line 234) | def get_model_names():
  function get_model (line 242) | def get_model(model_name, pretrain=True):
  class VisualizerWithoutAreaSorting (line 258) | class VisualizerWithoutAreaSorting(Visualizer):
    method __init__ (line 265) | def __init__(self, *args, flip=False, **kwargs):
    method overlay_instances (line 269) | def overlay_instances(
    method draw_box (line 398) | def draw_box(self, box_coord, alpha=1, edge_color="g", line_style="-"):

FILE: examples/domain_adaptation/object_detection/visualize.py
  function visualize (line 26) | def visualize(cfg, args, model):
  function setup (line 78) | def setup(args):
  function main (line 92) | def main(args):

FILE: examples/domain_adaptation/openset_domain_adaptation/dann.py
  function main (line 34) | def main(args: argparse.Namespace):
  function train (line 136) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...
  function validate (line 200) | def validate(val_loader: DataLoader, model: Classifier, args: argparse.N...

FILE: examples/domain_adaptation/openset_domain_adaptation/erm.py
  function main (line 32) | def main(args: argparse.Namespace):
  function train (line 128) | def train(train_source_iter: ForeverDataIterator, model: Classifier, opt...
  function validate (line 177) | def validate(val_loader: DataLoader, model: Classifier, args: argparse.N...

FILE: examples/domain_adaptation/openset_domain_adaptation/osbp.py
  function main (line 32) | def main(args: argparse.Namespace):
  function train (line 130) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...
  function validate (line 191) | def validate(val_loader: DataLoader, model: Classifier, args: argparse.N...

FILE: examples/domain_adaptation/openset_domain_adaptation/utils.py
  function get_model_names (line 13) | def get_model_names():
  function get_model (line 21) | def get_model(model_name):
  function get_dataset_names (line 39) | def get_dataset_names():
  function get_dataset (line 46) | def get_dataset(dataset_name, root, source, target, train_source_transfo...
  function get_train_transform (line 66) | def get_train_transform(resizing='default', random_horizontal_flip=True,...
  function get_val_transform (line 127) | def get_val_transform(resizing='default'):

FILE: examples/domain_adaptation/partial_domain_adaptation/afn.py
  function main (line 34) | def main(args: argparse.Namespace):
  function train (line 132) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/partial_domain_adaptation/dann.py
  function main (line 35) | def main(args: argparse.Namespace):
  function train (line 140) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/partial_domain_adaptation/erm.py
  function main (line 33) | def main(args: argparse.Namespace):
  function train (line 128) | def train(train_source_iter: ForeverDataIterator, model: Classifier, opt...

FILE: examples/domain_adaptation/partial_domain_adaptation/iwan.py
  function main (line 37) | def main(args: argparse.Namespace):
  function train (line 147) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/partial_domain_adaptation/pada.py
  function main (line 36) | def main(args: argparse.Namespace):
  function train (line 142) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/partial_domain_adaptation/utils.py
  function get_model_names (line 18) | def get_model_names():
  function get_model (line 26) | def get_model(model_name):
  function get_dataset_names (line 44) | def get_dataset_names():
  function get_dataset (line 51) | def get_dataset(dataset_name, root, source, target, train_source_transfo...
  function validate (line 70) | def validate(val_loader, model, args, device) -> float:
  function get_train_transform (line 117) | def get_train_transform(resizing='default', random_horizontal_flip=True,...
  function get_val_transform (line 173) | def get_val_transform(resizing='default'):

FILE: examples/domain_adaptation/re_identification/baseline.py
  function main (line 35) | def main(args: argparse.Namespace):
  function train (line 173) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/re_identification/baseline_cluster.py
  function main (line 36) | def main(args: argparse.Namespace):
  function run_kmeans (line 168) | def run_kmeans(cluster_loader: DataLoader, model: DataParallel, target_d...
  function run_dbscan (line 199) | def run_dbscan(cluster_loader: DataLoader, model: DataParallel, target_d...
  function train (line 243) | def train(train_target_iter: ForeverDataIterator, model, optimizer, crit...

FILE: examples/domain_adaptation/re_identification/mmt.py
  function main (line 37) | def main(args: argparse.Namespace):
  function create_model (line 179) | def create_model(args: argparse.Namespace, pretrained_model_path: str):
  function run_kmeans (line 195) | def run_kmeans(cluster_loader: DataLoader, model_1: DataParallel, model_...
  function run_dbscan (line 237) | def run_dbscan(cluster_loader: DataLoader, model_1: DataParallel, model_...
  function train (line 297) | def train(train_target_iter: ForeverDataIterator, model_1: DataParallel,...

FILE: examples/domain_adaptation/re_identification/spgan.py
  function main (line 36) | def main(args):
  function train (line 192) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...

FILE: examples/domain_adaptation/re_identification/utils.py
  function copy_state_dict (line 21) | def copy_state_dict(model, state_dict, strip=None):
  function get_model_names (line 47) | def get_model_names():
  function get_model (line 55) | def get_model(model_name):
  function get_train_transform (line 74) | def get_train_transform(height, width, resizing='default', random_horizo...
  function get_val_transform (line 108) | def get_val_transform(height, width):
  function visualize_tsne (line 116) | def visualize_tsne(source_loader, target_loader, model, filename, device...
  function k_reciprocal_neigh (line 134) | def k_reciprocal_neigh(initial_rank, i, k1):
  function compute_rerank_dist (line 144) | def compute_rerank_dist(target_features, k1=30, k2=6):

FILE: examples/domain_adaptation/semantic_segmentation/advent.py
  function main (line 36) | def main(args: argparse.Namespace):
  function train (line 184) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...
  function validate (line 276) | def validate(val_loader: DataLoader, model, interp, criterion, visualize...

FILE: examples/domain_adaptation/semantic_segmentation/cycada.py
  function main (line 36) | def main(args):
  function train (line 198) | def train(train_source_iter, train_target_iter, netG_S2T, netG_T2S, netD...

FILE: examples/domain_adaptation/semantic_segmentation/cycle_gan.py
  function main (line 34) | def main(args):
  function train (line 168) | def train(train_source_iter, train_target_iter, netG_S2T, netG_T2S, netD...

FILE: examples/domain_adaptation/semantic_segmentation/erm.py
  function main (line 34) | def main(args: argparse.Namespace):
  function train (line 161) | def train(train_source_iter: ForeverDataIterator, model, interp, criteri...
  function validate (line 217) | def validate(val_loader: DataLoader, model, interp, criterion, visualize...

FILE: examples/domain_adaptation/semantic_segmentation/fda.py
  function robust_entropy (line 39) | def robust_entropy(y, ita=1.5, num_classes=19, reduction='mean'):
  function main (line 71) | def main(args: argparse.Namespace):
  function train (line 215) | def train(train_source_iter: ForeverDataIterator, train_target_iter: For...
  function validate (line 296) | def validate(val_loader: DataLoader, model, interp, criterion, visualize...

FILE: examples/domain_adaptation/wilds_image_classification/cdan.py
  function main (line 42) | def main(args):
  function train (line 230) | def train(train_labeled_loader, train_unlabeled_loader, model, criterion...

FILE: examples/domain_adaptation/wilds_image_classification/dan.py
  function main (line 42) | def main(args):
  function train (line 221) | def train(train_labeled_loader, train_unlabeled_loader, model, criterion...

FILE: examples/domain_adaptation/wilds_image_classification/dann.py
  function main (line 42) | def main(args):
  function train (line 223) | def train(train_labeled_loader, train_unlabeled_loader, model, criterion...

FILE: examples/domain_adaptation/wilds_image_classification/erm.py
  function main (line 41) | def main(args):
  function train (line 213) | def train(train_loader, model, criterion, optimizer, epoch, writer, args):

FILE: examples/domain_adaptation/wilds_image_classification/fixmatch.py
  class ImageClassifier (line 43) | class ImageClassifier(Classifier):
    method __init__ (line 44) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...
    method forward (line 52) | def forward(self, x: torch.Tensor):
  function main (line 60) | def main(args):
  function train (line 247) | def train(train_labeled_loader, train_unlabeled_loader, model, criterion...

FILE: examples/domain_adaptation/wilds_image_classification/jan.py
  function main (line 43) | def main(args):
  function train (line 225) | def train(train_labeled_loader, train_unlabeled_loader, model, criterion...

FILE: examples/domain_adaptation/wilds_image_classification/mdd.py
  function main (line 42) | def main(args):
  function train (line 216) | def train(train_labeled_loader, train_unlabeled_loader, model, criterion...

FILE: examples/domain_adaptation/wilds_image_classification/utils.py
  function get_model_names (line 31) | def get_model_names():
  function get_model (line 35) | def get_model(model_name, pretrain=True):
  function get_dataset (line 47) | def get_dataset(dataset_name, root, unlabeled_list=("test_unlabeled",), ...
  function collate_list (line 82) | def collate_list(vec):
  function get_train_transform (line 106) | def get_train_transform(img_size, scale=None, ratio=None, hflip=0.5, vfl...
  function get_val_transform (line 154) | def get_val_transform(img_size=224, crop_pct=None, interpolation='biline...
  function _pil_interp (line 175) | def _pil_interp(method):
  function validate (line 187) | def validate(val_dataset, model, epoch, writer, args):
  function reduce_tensor (line 267) | def reduce_tensor(tensor, world_size):
  function matplotlib_imshow (line 274) | def matplotlib_imshow(img):
  function plot_classes_preds (line 281) | def plot_classes_preds(images, labels, outputs, class_names, metadata, m...
  function get_domain_names (line 309) | def get_domain_names(metadata, metadata_map):
  function get_domain_ids (line 313) | def get_domain_ids(metadata):

FILE: examples/domain_adaptation/wilds_ogb_molpcba/erm.py
  function main (line 21) | def main(args):
  function train (line 103) | def train(train_loader, model, criterion, optimizer, epoch, writer, args):

FILE: examples/domain_adaptation/wilds_ogb_molpcba/gin.py
  class GINVirtual (line 16) | class GINVirtual(torch.nn.Module):
    method __init__ (line 33) | def __init__(self, num_tasks=128, num_layers=5, emb_dim=300, dropout=0...
    method forward (line 58) | def forward(self, batched_data):
  class GINVirtualNode (line 69) | class GINVirtualNode(torch.nn.Module):
    method __init__ (line 85) | def __init__(self, num_layers, emb_dim, dropout=0.5):
    method forward (line 118) | def forward(self, batched_data):
  class GINConv (line 155) | class GINConv(MessagePassing):
    method __init__ (line 170) | def __init__(self, emb_dim):
    method forward (line 179) | def forward(self, x, edge_index, edge_attr):
    method message (line 185) | def message(self, x_j, edge_attr):
    method update (line 188) | def update(self, aggr_out):
  function gin_virtual (line 192) | def gin_virtual(num_tasks, dropout=0.5):

FILE: examples/domain_adaptation/wilds_ogb_molpcba/utils.py
  function reduced_bce_logit_loss (line 19) | def reduced_bce_logit_loss(y_pred, y_target):
  function get_dataset (line 33) | def get_dataset(dataset_name, root, unlabeled_list=('test_unlabeled',), ...
  function get_model_names (line 71) | def get_model_names():
  function get_model (line 76) | def get_model(arch, num_classes):
  function collate_list (line 84) | def collate_list(vec):
  function validate (line 108) | def validate(val_dataset, model, epoch, writer, args):

FILE: examples/domain_adaptation/wilds_poverty/erm.py
  function main (line 32) | def main(args):
  function train (line 179) | def train(train_loader, model, optimizer, epoch, writer, args):

FILE: examples/domain_adaptation/wilds_poverty/resnet_ms.py
  class ResNetMS (line 14) | class ResNetMS(models.ResNet):
    method __init__ (line 19) | def __init__(self, in_channels, *args, **kwargs):
    method forward (line 26) | def forward(self, x):
    method out_features (line 43) | def out_features(self) -> int:
    method copy_head (line 47) | def copy_head(self) -> nn.Module:
  function resnet18_ms (line 52) | def resnet18_ms(num_channels=3):
  function resnet34_ms (line 57) | def resnet34_ms(num_channels=3):
  function resnet50_ms (line 62) | def resnet50_ms(num_channels=3):
  function resnet101_ms (line 67) | def resnet101_ms(num_channels=3):
  function resnet152_ms (line 72) | def resnet152_ms(num_channels=3):

FILE: examples/domain_adaptation/wilds_poverty/utils.py
  class Regressor (line 22) | class Regressor(nn.Module):
    method __init__ (line 56) | def __init__(self, backbone: nn.Module, bottleneck: Optional[nn.Module...
    method features_dim (line 82) | def features_dim(self) -> int:
    method forward (line 86) | def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
    method get_parameters (line 96) | def get_parameters(self, base_lr=1.0) -> List[Dict]:
  function get_dataset (line 109) | def get_dataset(dataset_name, root, unlabeled_list=("test_unlabeled",), ...
  function get_model_names (line 143) | def get_model_names():
  function get_model (line 148) | def get_model(arch, num_channels):
  function collate_list (line 156) | def collate_list(vec):
  function reduce_tensor (line 180) | def reduce_tensor(tensor, world_size):
  function validate (line 187) | def validate(val_dataset, model, epoch, writer, args):

FILE: examples/domain_adaptation/wilds_text/erm.py
  function main (line 28) | def main(args):
  function train (line 133) | def train(train_loader, model, criterion, optimizer, epoch, writer, args):

FILE: examples/domain_adaptation/wilds_text/utils.py
  class DistilBertClassifier (line 20) | class DistilBertClassifier(DistilBertForSequenceClassification):
    method __call__ (line 25) | def __call__(self, x):
  function get_transform (line 35) | def get_transform(arch, max_token_length):
  function get_dataset (line 64) | def get_dataset(dataset_name, root, unlabeled_list=('extra_unlabeled',),...
  function get_model_names (line 99) | def get_model_names():
  function get_model (line 103) | def get_model(arch, num_classes):
  function reduce_tensor (line 111) | def reduce_tensor(tensor, world_size):
  function collate_list (line 118) | def collate_list(vec):
  function validate (line 142) | def validate(val_dataset, model, epoch, writer, args):

FILE: examples/domain_generalization/image_classification/coral.py
  function main (line 31) | def main(args: argparse.Namespace):
  function train (line 144) | def train(train_iter: ForeverDataIterator, model, optimizer, lr_schedule...

FILE: examples/domain_generalization/image_classification/erm.py
  function main (line 30) | def main(args: argparse.Namespace):
  function train (line 138) | def train(train_iter: ForeverDataIterator, model, optimizer, lr_schedule...

FILE: examples/domain_generalization/image_classification/groupdro.py
  function main (line 32) | def main(args: argparse.Namespace):
  function train (line 143) | def train(train_iter: ForeverDataIterator, model, optimizer, lr_schedule...

FILE: examples/domain_generalization/image_classification/irm.py
  class InvariancePenaltyLoss (line 32) | class InvariancePenaltyLoss(nn.Module):
    method __init__ (line 48) | def __init__(self):
    method forward (line 52) | def forward(self, y: torch.Tensor, labels: torch.Tensor) -> torch.Tensor:
  function main (line 61) | def main(args: argparse.Namespace):
  function train (line 182) | def train(train_iter: ForeverDataIterator, model, optimizer, lr_schedule...

FILE: examples/domain_generalization/image_classification/mixstyle.py
  function main (line 31) | def main(args: argparse.Namespace):
  function train (line 141) | def train(train_iter: ForeverDataIterator, model, optimizer,

FILE: examples/domain_generalization/image_classification/mldg.py
  function main (line 31) | def main(args: argparse.Namespace):
  function random_split (line 140) | def random_split(x_list, labels_list, n_domains_per_batch, n_support_dom...
  function train (line 152) | def train(train_iter: ForeverDataIterator, model, optimizer, lr_schedule...

FILE: examples/domain_generalization/image_classification/utils.py
  function get_model_names (line 28) | def get_model_names():
  function get_model (line 36) | def get_model(model_name):
  function get_dataset_names (line 55) | def get_dataset_names():
  class ConcatDatasetWithDomainLabel (line 62) | class ConcatDatasetWithDomainLabel(ConcatDataset):
    method __init__ (line 65) | def __init__(self, *args, **kwargs):
    method __getitem__ (line 76) | def __getitem__(self, index):
  function get_dataset (line 82) | def get_dataset(dataset_name, root, task_list, split='train', download=T...
  function split_dataset (line 131) | def split_dataset(dataset, n, seed=0):
  function validate (line 145) | def validate(val_loader, model, args, device) -> float:
  function get_train_transform (line 184) | def get_train_transform(resizing='default', random_horizontal_flip=True,...
  function get_val_transform (line 246) | def get_val_transform(resizing='default'):
  function collect_feature (line 271) | def collect_feature(data_loader, feature_extractor: nn.Module, device: t...
  class ImageClassifier (line 299) | class ImageClassifier(ClassifierBase):
    method __init__ (line 311) | def __init__(self, backbone: nn.Module, num_classes: int, freeze_bn=Fa...
    method forward (line 317) | def forward(self, x: torch.Tensor):
    method train (line 328) | def train(self, mode=True):
  class RandomDomainSampler (line 336) | class RandomDomainSampler(Sampler):
    method __init__ (line 346) | def __init__(self, data_source: ConcatDataset, batch_size: int, n_doma...
    method __iter__ (line 363) | def __iter__(self):
    method __len__ (line 389) | def __len__(self):

FILE: examples/domain_generalization/image_classification/vrex.py
  function main (line 30) | def main(args: argparse.Namespace):
  function train (line 147) | def train(train_iter: ForeverDataIterator, model, optimizer, lr_schedule...

FILE: examples/domain_generalization/re_identification/baseline.py
  function main (line 35) | def main(args: argparse.Namespace):
  function train (line 169) | def train(train_iter: ForeverDataIterator, model, criterion_ce: CrossEnt...

FILE: examples/domain_generalization/re_identification/mixstyle.py
  function main (line 37) | def main(args: argparse.Namespace):
  function train (line 172) | def train(train_iter: ForeverDataIterator, model, criterion_ce: CrossEnt...

FILE: examples/domain_generalization/re_identification/utils.py
  function get_model_names (line 18) | def get_model_names():
  function get_model (line 26) | def get_model(model_name):
  function get_train_transform (line 45) | def get_train_transform(height, width, resizing='default', random_horizo...
  function get_val_transform (line 77) | def get_val_transform(height, width):
  function visualize_tsne (line 85) | def visualize_tsne(source_loader, target_loader, model, filename, device...

FILE: examples/model_selection/hscore.py
  function main (line 23) | def main(args):

FILE: examples/model_selection/leep.py
  function main (line 23) | def main(args):

FILE: examples/model_selection/logme.py
  function main (line 23) | def main(args):

FILE: examples/model_selection/nce.py
  function main (line 23) | def main(args):

FILE: examples/model_selection/utils.py
  class Logger (line 19) | class Logger(object):
    method __init__ (line 27) | def __init__(self, data_name, model_name, metric_name, stream=sys.stdo...
    method write (line 34) | def write(self, message):
    method get_save_dir (line 39) | def get_save_dir(self):
    method get_result_dir (line 42) | def get_result_dir(self):
    method flush (line 45) | def flush(self):
    method close (line 49) | def close(self):
  function get_model_names (line 54) | def get_model_names():
  function forwarding_dataset (line 62) | def forwarding_dataset(score_loader, model, layer, device):
  function get_model (line 102) | def get_model(model_name, pretrained=True, pretrained_checkpoint=None):
  function get_dataset (line 116) | def get_dataset(dataset_name, root, transform, sample_rate=100, num_samp...
  function get_transform (line 140) | def get_transform(resizing='res.'):

FILE: examples/semi_supervised_learning/image_classification/debiasmatch.py
  function main (line 30) | def main(args: argparse.Namespace):
  function train (line 131) | def train(labeled_train_iter: ForeverDataIterator, unlabeled_train_iter:...

FILE: examples/semi_supervised_learning/image_classification/dst.py
  function main (line 31) | def main(args: argparse.Namespace):
  function train (line 129) | def train(labeled_train_iter: ForeverDataIterator, unlabeled_train_iter:...

FILE: examples/semi_supervised_learning/image_classification/erm.py
  function main (line 29) | def main(args: argparse.Namespace):

FILE: examples/semi_supervised_learning/image_classification/fixmatch.py
  function main (line 30) | def main(args: argparse.Namespace):
  function train (line 128) | def train(labeled_train_iter: ForeverDataIterator, unlabeled_train_iter:...

FILE: examples/semi_supervised_learning/image_classification/flexmatch.py
  function main (line 30) | def main(args: argparse.Namespace):
  function train (line 135) | def train(labeled_train_iter: ForeverDataIterator, unlabeled_train_iter:...

FILE: examples/semi_supervised_learning/image_classification/mean_teacher.py
  function main (line 31) | def main(args: argparse.Namespace):
  function train (line 130) | def train(labeled_train_iter: ForeverDataIterator, unlabeled_train_iter:...

FILE: examples/semi_supervised_learning/image_classification/noisy_student.py
  class ImageClassifier (line 32) | class ImageClassifier(Classifier):
    method __init__ (line 33) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...
    method forward (line 45) | def forward(self, x: torch.Tensor):
  function calc_teacher_output (line 55) | def calc_teacher_output(classifier_teacher: ImageClassifier, weak_augmen...
  function main (line 89) | def main(args: argparse.Namespace):
  function train (line 213) | def train(labeled_train_iter: ForeverDataIterator, unlabeled_train_iter:...

FILE: examples/semi_supervised_learning/image_classification/pi_model.py
  function main (line 30) | def main(args: argparse.Namespace):
  function train (line 128) | def train(labeled_train_iter: ForeverDataIterator, unlabeled_train_iter:...

FILE: examples/semi_supervised_learning/image_classification/pseudo_label.py
  function main (line 30) | def main(args: argparse.Namespace):
  function train (line 128) | def train(labeled_train_iter: ForeverDataIterator, unlabeled_train_iter:...

FILE: examples/semi_supervised_learning/image_classification/self_tuning.py
  function main (line 29) | def main(args: argparse.Namespace):
  function train (line 129) | def train(labeled_train_iter: ForeverDataIterator, unlabeled_train_iter:...

FILE: examples/semi_supervised_learning/image_classification/uda.py
  function main (line 30) | def main(args: argparse.Namespace):
  function train (line 128) | def train(labeled_train_iter: ForeverDataIterator, unlabeled_train_iter:...

FILE: examples/semi_supervised_learning/image_classification/utils.py
  function get_model_names (line 28) | def get_model_names():
  function get_model (line 36) | def get_model(model_name, pretrained=True, pretrained_checkpoint=None):
  function get_dataset_names (line 56) | def get_dataset_names():
  function get_dataset (line 63) | def get_dataset(dataset_name, num_samples_per_class, root, labeled_train...
  function x_u_split (line 100) | def x_u_split(num_samples_per_class, num_classes, labels, seed):
  function get_train_transform (line 121) | def get_train_transform(resizing='default', random_horizontal_flip=True,...
  function get_val_transform (line 152) | def get_val_transform(resizing='default', norm_mean=(0.485, 0.456, 0.406...
  function convert_dataset (line 169) | def convert_dataset(dataset):
  class ImageClassifier (line 188) | class ImageClassifier(Classifier):
    method __init__ (line 189) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...
    method forward (line 200) | def forward(self, x: torch.Tensor):
  function get_cosine_scheduler_with_warmup (line 207) | def get_cosine_scheduler_with_warmup(optimizer, T_max, num_cycles=7. / 1...
  function validate (line 234) | def validate(val_loader, model, args, device, num_classes):
  function empirical_risk_minimization (line 281) | def empirical_risk_minimization(labeled_train_iter, model, optimizer, lr...

FILE: examples/task_adaptation/image_classification/bi_tuning.py
  function main (line 28) | def main(args: argparse.Namespace):
  function train (line 110) | def train(train_iter: ForeverDataIterator, bituning: BiTuning, optimizer...

FILE: examples/task_adaptation/image_classification/bss.py
  function main (line 29) | def main(args: argparse.Namespace):
  function train (line 99) | def train(train_iter: ForeverDataIterator, model: Classifier, bss_module...

FILE: examples/task_adaptation/image_classification/co_tuning.py
  function get_dataset (line 31) | def get_dataset(dataset_name, root, train_transform, val_transform, samp...
  function main (line 54) | def main(args: argparse.Namespace):
  function train (line 132) | def train(train_iter: ForeverDataIterator, model: Classifier, optimizer:...

FILE: examples/task_adaptation/image_classification/delta.py
  function main (line 32) | def main(args: argparse.Namespace):
  function calculate_channel_attention (line 142) | def calculate_channel_attention(dataset, return_layers, num_classes, args):
  function train (line 233) | def train(train_iter: ForeverDataIterator, model: Classifier, backbone_r...

FILE: examples/task_adaptation/image_classification/erm.py
  function main (line 28) | def main(args: argparse.Namespace):
  function train (line 99) | def train(train_iter: ForeverDataIterator, model: Classifier, optimizer:...

FILE: examples/task_adaptation/image_classification/lwf.py
  function main (line 29) | def main(args: argparse.Namespace):
  function train (line 105) | def train(train_iter: ForeverDataIterator, model: Classifier, kd, optimi...

FILE: examples/task_adaptation/image_classification/stochnorm.py
  function main (line 29) | def main(args: argparse.Namespace):
  function train (line 101) | def train(train_iter: ForeverDataIterator, model: Classifier, optimizer:...

FILE: examples/task_adaptation/image_classification/utils.py
  function get_model_names (line 27) | def get_model_names():
  function get_model (line 35) | def get_model(model_name, pretrained_checkpoint=None):
  function get_dataset (line 57) | def get_dataset(dataset_name, root, train_transform, val_transform, samp...
  function validate (line 82) | def validate(val_loader, model, args, device, visualize=None) -> float:
  function get_train_transform (line 123) | def get_train_transform(resizing='default', random_horizontal_flip=True,...
  function get_val_transform (line 170) | def get_val_transform(resizing='default'):
  function get_optimizer (line 199) | def get_optimizer(optimizer_name, params, lr, wd, momentum):
  function visualize (line 219) | def visualize(image, filename):

FILE: tllib/alignment/adda.py
  class DomainAdversarialLoss (line 12) | class DomainAdversarialLoss(nn.Module):
    method __init__ (line 31) | def __init__(self):
    method forward (line 34) | def forward(self, domain_pred, domain_label='source'):
  class ImageClassifier (line 42) | class ImageClassifier(ClassifierBase):
    method __init__ (line 43) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...
    method freeze_bn (line 53) | def freeze_bn(self):
    method get_parameters (line 58) | def get_parameters(self, base_lr=1.0, optimize_head=True) -> List[Dict]:

FILE: tllib/alignment/advent.py
  class Discriminator (line 11) | class Discriminator(nn.Sequential):
    method __init__ (line 27) | def __init__(self, num_classes, ndf=64):
  function prob_2_entropy (line 41) | def prob_2_entropy(prob):
  function bce_loss (line 48) | def bce_loss(y_pred, y_label):
  class DomainAdversarialEntropyLoss (line 55) | class DomainAdversarialEntropyLoss(nn.Module):
    method __init__ (line 82) | def __init__(self, discriminator: nn.Module):
    method forward (line 86) | def forward(self, logits, domain_label='source'):
    method train (line 98) | def train(self, mode=True):
    method eval (line 110) | def eval(self):

FILE: tllib/alignment/bsp.py
  class BatchSpectralPenalizationLoss (line 11) | class BatchSpectralPenalizationLoss(nn.Module):
    method __init__ (line 43) | def __init__(self):
    method forward (line 46) | def forward(self, f_s, f_t):
  class ImageClassifier (line 53) | class ImageClassifier(ClassifierBase):
    method __init__ (line 54) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...

FILE: tllib/alignment/cdan.py
  class ConditionalDomainAdversarialLoss (line 20) | class ConditionalDomainAdversarialLoss(nn.Module):
    method __init__ (line 80) | def __init__(self, domain_discriminator: nn.Module, entropy_conditioni...
    method forward (line 101) | def forward(self, g_s: torch.Tensor, f_s: torch.Tensor, g_t: torch.Ten...
  class RandomizedMultiLinearMap (line 133) | class RandomizedMultiLinearMap(nn.Module):
    method __init__ (line 155) | def __init__(self, features_dim: int, num_classes: int, output_dim: Op...
    method forward (line 161) | def forward(self, f: torch.Tensor, g: torch.Tensor) -> torch.Tensor:
  class MultiLinearMap (line 168) | class MultiLinearMap(nn.Module):
    method __init__ (line 177) | def __init__(self):
    method forward (line 180) | def forward(self, f: torch.Tensor, g: torch.Tensor) -> torch.Tensor:
  class ImageClassifier (line 186) | class ImageClassifier(ClassifierBase):
    method __init__ (line 187) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...

FILE: tllib/alignment/coral.py
  class CorrelationAlignmentLoss (line 9) | class CorrelationAlignmentLoss(nn.Module):
    method __init__ (line 37) | def __init__(self):
    method forward (line 40) | def forward(self, f_s: torch.Tensor, f_t: torch.Tensor) -> torch.Tensor:

FILE: tllib/alignment/d_adapt/feedback.py
  function load_feedbacks_into_dataset (line 24) | def load_feedbacks_into_dataset(dataset_dicts, proposals_list: List[Prop...
  function get_detection_dataset_dicts (line 66) | def get_detection_dataset_dicts(names, filter_empty=True, min_keypoints=...
  function transform_feedbacks (line 111) | def transform_feedbacks(dataset_dict, image_shape, transforms, *, min_bo...
  class DatasetMapper (line 165) | class DatasetMapper:
    method __init__ (line 183) | def __init__(
    method from_config (line 231) | def from_config(cls, cfg, is_train: bool = True):
    method __call__ (line 260) | def __call__(self, dataset_dict):

FILE: tllib/alignment/d_adapt/modeling/matcher.py
  class MaxOverlapMatcher (line 11) | class MaxOverlapMatcher(object):
    method __init__ (line 18) | def __init__(self):
    method __call__ (line 21) | def __call__(self, match_quality_matrix):

FILE: tllib/alignment/d_adapt/modeling/meta_arch/rcnn.py
  class DecoupledGeneralizedRCNN (line 19) | class DecoupledGeneralizedRCNN(TLGeneralizedRCNN):
    method __init__ (line 60) | def __init__(self, *args, **kwargs):
    method forward (line 63) | def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]], labe...
    method visualize_training (line 93) | def visualize_training(self, batched_inputs, proposals, feedbacks=None):
    method inference (line 148) | def inference(

FILE: tllib/alignment/d_adapt/modeling/meta_arch/retinanet.py
  class DecoupledRetinaNet (line 23) | class DecoupledRetinaNet(TLRetinaNet):
    method __init__ (line 92) | def __init__(self, *args, max_samples_per_level=25, **kwargs):
    method forward_training (line 97) | def forward_training(self, images, features, predictions, gt_instances...
    method forward (line 112) | def forward(self, batched_inputs: Tuple[Dict[str, Tensor]], labeled=Tr...
    method label_pseudo_anchors (line 156) | def label_pseudo_anchors(self, anchors, instances):
    method sample_background (line 199) | def sample_background(
    method sample_background_single_image (line 217) | def sample_background_single_image(
    method visualize_training (line 255) | def visualize_training(self, batched_inputs, results, feedbacks=None):

FILE: tllib/alignment/d_adapt/modeling/roi_heads/fast_rcnn.py
  function label_smoothing_cross_entropy (line 18) | def label_smoothing_cross_entropy(input, target, *, reduction="mean", **...
  class DecoupledFastRCNNOutputLayers (line 28) | class DecoupledFastRCNNOutputLayers(FastRCNNOutputLayers):
    method losses (line 38) | def losses(self, predictions, proposals):

FILE: tllib/alignment/d_adapt/modeling/roi_heads/roi_heads.py
  class DecoupledRes5ROIHeads (line 27) | class DecoupledRes5ROIHeads(Res5ROIHeads):
    method __init__ (line 40) | def __init__(self, *args, **kwargs):
    method from_config (line 44) | def from_config(cls, cfg, input_shape):
    method forward (line 52) | def forward(self, images, features, proposals, targets=None, feedbacks...
    method label_and_sample_feedbacks (line 148) | def label_and_sample_feedbacks(
  class DecoupledStandardROIHeads (line 211) | class DecoupledStandardROIHeads(StandardROIHeads):
    method __init__ (line 226) | def __init__(self, *args, **kwargs):
    method from_config (line 230) | def from_config(cls, cfg, input_shape):
    method forward (line 237) | def forward(self, images, features, proposals, targets=None, feedbacks...
    method _forward_box (line 323) | def _forward_box(self, features: Dict[str, torch.Tensor], proposals: L...
    method label_and_sample_feedbacks (line 362) | def label_and_sample_feedbacks(
  function fast_rcnn_sample_background (line 421) | def fast_rcnn_sample_background(
  function fast_rcnn_sample_background_single_image (line 463) | def fast_rcnn_sample_background_single_image(

FILE: tllib/alignment/d_adapt/proposal.py
  class ProposalMapper (line 24) | class ProposalMapper(DatasetMapper):
    method __call__ (line 40) | def __call__(self, dataset_dict):
  class ProposalGenerator (line 91) | class ProposalGenerator(DatasetEvaluator):
    method __init__ (line 99) | def __init__(self, iou_threshold=(0.4, 0.5), num_classes=20, *args, **...
    method process_type (line 106) | def process_type(self, inputs, outputs, type='instances'):
    method process (line 143) | def process(self, inputs, outputs):
    method evaluate (line 147) | def evaluate(self):
  class Proposal (line 151) | class Proposal:
    method __init__ (line 167) | def __init__(self, image_id, filename, pred_boxes, pred_classes, pred_...
    method to_dict (line 179) | def to_dict(self):
    method __str__ (line 193) | def __str__(self):
    method __len__ (line 197) | def __len__(self):
    method __getitem__ (line 200) | def __getitem__(self, item):
  class ProposalEncoder (line 214) | class ProposalEncoder(json.JSONEncoder):
    method default (line 215) | def default(self, obj):
  function asProposal (line 221) | def asProposal(dict):
  class PersistentProposalList (line 237) | class PersistentProposalList(list):
    method __init__ (line 244) | def __init__(self, filename=None):
    method load (line 248) | def load(self):
    method flush (line 263) | def flush(self):
  function flatten (line 273) | def flatten(proposal_list, max_number=10000):
  class ProposalDataset (line 289) | class ProposalDataset(datasets.VisionDataset):
    method __init__ (line 299) | def __init__(self, proposal_list: List[Proposal], transform: Optional[...
    method __getitem__ (line 305) | def __getitem__(self, index: int):
    method __len__ (line 340) | def __len__(self):
  class ExpandCrop (line 344) | class ExpandCrop:
    method __init__ (line 349) | def __init__(self, expand=1.):
    method __call__ (line 352) | def __call__(self, img, top, left, height, width):

FILE: tllib/alignment/dan.py
  class MultipleKernelMaximumMeanDiscrepancy (line 15) | class MultipleKernelMaximumMeanDiscrepancy(nn.Module):
    method __init__ (line 72) | def __init__(self, kernels: Sequence[nn.Module], linear: Optional[bool...
    method forward (line 78) | def forward(self, z_s: torch.Tensor, z_t: torch.Tensor) -> torch.Tensor:
  function _update_index_matrix (line 92) | def _update_index_matrix(batch_size: int, index_matrix: Optional[torch.T...
  class ImageClassifier (line 122) | class ImageClassifier(ClassifierBase):
    method __init__ (line 123) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...

FILE: tllib/alignment/dann.py
  class DomainAdversarialLoss (line 17) | class DomainAdversarialLoss(nn.Module):
    method __init__ (line 59) | def __init__(self, domain_discriminator: nn.Module, reduction: Optiona...
    method forward (line 70) | def forward(self, f_s: torch.Tensor, f_t: torch.Tensor,
  class ImageClassifier (line 110) | class ImageClassifier(ClassifierBase):
    method __init__ (line 111) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...

FILE: tllib/alignment/jan.py
  class JointMultipleKernelMaximumMeanDiscrepancy (line 19) | class JointMultipleKernelMaximumMeanDiscrepancy(nn.Module):
    method __init__ (line 69) | def __init__(self, kernels: Sequence[Sequence[nn.Module]], linear: Opt...
    method forward (line 79) | def forward(self, z_s: torch.Tensor, z_t: torch.Tensor) -> torch.Tensor:
  class Theta (line 96) | class Theta(nn.Module):
    method __init__ (line 101) | def __init__(self, dim: int):
    method forward (line 109) | def forward(self, features: torch.Tensor) -> torch.Tensor:
  class ImageClassifier (line 114) | class ImageClassifier(ClassifierBase):
    method __init__ (line 115) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...

FILE: tllib/alignment/mcd.py
  function classifier_discrepancy (line 10) | def classifier_discrepancy(predictions1: torch.Tensor, predictions2: tor...
  function entropy (line 28) | def entropy(predictions: torch.Tensor) -> torch.Tensor:
  class ImageClassifierHead (line 46) | class ImageClassifierHead(nn.Module):
    method __init__ (line 59) | def __init__(self, in_features: int, num_classes: int, bottleneck_dim:...
    method forward (line 81) | def forward(self, inputs: torch.Tensor) -> torch.Tensor:

FILE: tllib/alignment/mdd.py
  class MarginDisparityDiscrepancy (line 13) | class MarginDisparityDiscrepancy(nn.Module):
    method __init__ (line 60) | def __init__(self, source_disparity: Callable, target_disparity: Calla...
    method forward (line 68) | def forward(self, y_s: torch.Tensor, y_s_adv: torch.Tensor, y_t: torch...
  class ClassificationMarginDisparityDiscrepancy (line 88) | class ClassificationMarginDisparityDiscrepancy(MarginDisparityDiscrepancy):
    method __init__ (line 142) | def __init__(self, margin: Optional[float] = 4, **kwargs):
  class RegressionMarginDisparityDiscrepancy (line 155) | class RegressionMarginDisparityDiscrepancy(MarginDisparityDiscrepancy):
    method __init__ (line 209) | def __init__(self, margin: Optional[float] = 1, loss_function=F.l1_los...
  function shift_log (line 220) | def shift_log(x: torch.Tensor, offset: Optional[float] = 1e-6) -> torch....
  class GeneralModule (line 239) | class GeneralModule(nn.Module):
    method __init__ (line 240) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck: ...
    method forward (line 253) | def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
    method step (line 265) | def step(self):
    method get_parameters (line 271) | def get_parameters(self, base_lr=1.0) -> List[Dict]:
  class ImageClassifier (line 285) | class ImageClassifier(GeneralModule):
    method __init__ (line 321) | def __init__(self, backbone: nn.Module, num_classes: int,
  class ImageRegressor (line 365) | class ImageRegressor(GeneralModule):
    method __init__ (line 400) | def __init__(self, backbone: nn.Module, num_factors: int, bottleneck =...

FILE: tllib/alignment/osbp.py
  class UnknownClassBinaryCrossEntropy (line 14) | class UnknownClassBinaryCrossEntropy(nn.Module):
    method __init__ (line 38) | def __init__(self, t: Optional[float]=0.5):
    method forward (line 42) | def forward(self, y):
  class ImageClassifier (line 54) | class ImageClassifier(ClassifierBase):
    method __init__ (line 55) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...
    method forward (line 71) | def forward(self, x: torch.Tensor, grad_reverse: Optional[bool] = False):

FILE: tllib/alignment/regda.py
  class FastPseudoLabelGenerator2d (line 15) | class FastPseudoLabelGenerator2d(nn.Module):
    method __init__ (line 16) | def __init__(self, sigma=2):
    method forward (line 20) | def forward(self, heatmap: torch.Tensor):
  class PseudoLabelGenerator2d (line 34) | class PseudoLabelGenerator2d(nn.Module):
    method __init__ (line 57) | def __init__(self, num_keypoints, height=64, width=64, sigma=2):
    method forward (line 93) | def forward(self, y):
  class RegressionDisparity (line 105) | class RegressionDisparity(nn.Module):
    method __init__ (line 145) | def __init__(self, pseudo_label_generator: PseudoLabelGenerator2d, cri...
    method forward (line 150) | def forward(self, y, y_adv, weight=None, mode='min'):
  class PoseResNet2d (line 161) | class PoseResNet2d(nn.Module):
    method __init__ (line 192) | def __init__(self, backbone, upsampling, feature_dim, num_keypoints,
    method _make_head (line 203) | def _make_head(num_layers, channel_dim, num_keypoints):
    method forward (line 227) | def forward(self, x):
    method get_parameters (line 239) | def get_parameters(self, lr=1.):
    method step (line 247) | def step(self):

FILE: tllib/alignment/rsd.py
  class RepresentationSubspaceDistance (line 9) | class RepresentationSubspaceDistance(nn.Module):
    method __init__ (line 22) | def __init__(self, trade_off=0.1):
    method forward (line 26) | def forward(self, f_s, f_t):

FILE: tllib/modules/classifier.py
  class Classifier (line 12) | class Classifier(nn.Module):
    method __init__ (line 47) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck: ...
    method features_dim (line 74) | def features_dim(self) -> int:
    method forward (line 78) | def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
    method get_parameters (line 88) | def get_parameters(self, base_lr=1.0) -> List[Dict]:
  class ImageClassifier (line 101) | class ImageClassifier(Classifier):

FILE: tllib/modules/domain_discriminator.py
  class DomainDiscriminator (line 11) | class DomainDiscriminator(nn.Sequential):
    method __init__ (line 29) | def __init__(self, in_feature: int, hidden_size: int, batch_norm=True,...
    method get_parameters (line 58) | def get_parameters(self) -> List[Dict]:

FILE: tllib/modules/entropy.py
  function entropy (line 8) | def entropy(predictions: torch.Tensor, reduction='none') -> torch.Tensor:

FILE: tllib/modules/gl.py
  class GradientFunction (line 12) | class GradientFunction(Function):
    method forward (line 15) | def forward(ctx: Any, input: torch.Tensor, coeff: Optional[float] = 1....
    method backward (line 21) | def backward(ctx: Any, grad_output: torch.Tensor) -> Tuple[torch.Tenso...
  class WarmStartGradientLayer (line 25) | class WarmStartGradientLayer(nn.Module):
    method __init__ (line 51) | def __init__(self, alpha: Optional[float] = 1.0, lo: Optional[float] =...
    method forward (line 61) | def forward(self, input: torch.Tensor) -> torch.Tensor:
    method step (line 71) | def step(self):

FILE: tllib/modules/grl.py
  class GradientReverseFunction (line 12) | class GradientReverseFunction(Function):
    method forward (line 15) | def forward(ctx: Any, input: torch.Tensor, coeff: Optional[float] = 1....
    method backward (line 21) | def backward(ctx: Any, grad_output: torch.Tensor) -> Tuple[torch.Tenso...
  class GradientReverseLayer (line 25) | class GradientReverseLayer(nn.Module):
    method __init__ (line 26) | def __init__(self):
    method forward (line 29) | def forward(self, *input):
  class WarmStartGradientReverseLayer (line 33) | class WarmStartGradientReverseLayer(nn.Module):
    method __init__ (line 59) | def __init__(self, alpha: Optional[float] = 1.0, lo: Optional[float] =...
    method forward (line 69) | def forward(self, input: torch.Tensor) -> torch.Tensor:
    method step (line 79) | def step(self):

FILE: tllib/modules/kernels.py
  class GaussianKernel (line 13) | class GaussianKernel(nn.Module):
    method __init__ (line 48) | def __init__(self, sigma: Optional[float] = None, track_running_stats:...
    method forward (line 56) | def forward(self, X: torch.Tensor) -> torch.Tensor:

FILE: tllib/modules/loss.py
  class LabelSmoothSoftmaxCEV1 (line 7) | class LabelSmoothSoftmaxCEV1(nn.Module):
    method __init__ (line 12) | def __init__(self, lb_smooth=0.1, reduction='mean', ignore_index=-1):
    method forward (line 19) | def forward(self, input, target):
  class KnowledgeDistillationLoss (line 50) | class KnowledgeDistillationLoss(nn.Module):
    method __init__ (line 69) | def __init__(self, T=1., reduction='batchmean'):
    method forward (line 74) | def forward(self, y_student, y_teacher):

FILE: tllib/modules/regressor.py
  class Regressor (line 12) | class Regressor(nn.Module):
    method __init__ (line 41) | def __init__(self, backbone: nn.Module, num_factors: int, bottleneck: ...
    method features_dim (line 71) | def features_dim(self) -> int:
    method forward (line 75) | def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
    method get_parameters (line 85) | def get_parameters(self, base_lr=1.0) -> List[Dict]:

FILE: tllib/normalization/afn.py
  class AdaptiveFeatureNorm (line 14) | class AdaptiveFeatureNorm(nn.Module):
    method __init__ (line 49) | def __init__(self, delta):
    method forward (line 53) | def forward(self, f: torch.Tensor) -> torch.Tensor:
  class Block (line 61) | class Block(nn.Module):
    method __init__ (line 82) | def __init__(self, in_features: int, bottleneck_dim: Optional[int] = 1...
    method forward (line 90) | def forward(self, x: torch.Tensor) -> torch.Tensor:
  class ImageClassifier (line 100) | class ImageClassifier(ClassfierBase):
    method __init__ (line 112) | def __init__(self, backbone: nn.Module, num_classes: int, num_blocks: ...
    method get_parameters (line 135) | def get_parameters(self, base_lr=1.0) -> List[Dict]:

FILE: tllib/normalization/ibn.py
  class InstanceBatchNorm2d (line 25) | class InstanceBatchNorm2d(nn.Module):
    method __init__ (line 41) | def __init__(self, planes, ratio=0.5):
    method forward (line 47) | def forward(self, x):
  class BasicBlock (line 55) | class BasicBlock(nn.Module):
    method __init__ (line 58) | def __init__(self, inplanes, planes, ibn=None, stride=1, downsample=No...
    method forward (line 73) | def forward(self, x):
  class Bottleneck (line 94) | class Bottleneck(nn.Module):
    method __init__ (line 97) | def __init__(self, inplanes, planes, ibn=None, stride=1, downsample=No...
    method forward (line 114) | def forward(self, x):
  class IBNNet (line 139) | class IBNNet(nn.Module):
    method __init__ (line 144) | def __init__(self, block, layers, ibn_cfg=('a', 'a', 'a', None)):
    method _make_layer (line 169) | def _make_layer(self, block, planes, blocks, stride=1, ibn=None):
    method forward (line 189) | def forward(self, x):
    method out_features (line 204) | def out_features(self) -> int:
  function resnet18_ibn_a (line 209) | def resnet18_ibn_a(pretrained=False):
  function resnet34_ibn_a (line 223) | def resnet34_ibn_a(pretrained=False):
  function resnet50_ibn_a (line 237) | def resnet50_ibn_a(pretrained=False):
  function resnet101_ibn_a (line 251) | def resnet101_ibn_a(pretrained=False):
  function resnet18_ibn_b (line 265) | def resnet18_ibn_b(pretrained=False):
  function resnet34_ibn_b (line 279) | def resnet34_ibn_b(pretrained=False):
  function resnet50_ibn_b (line 293) | def resnet50_ibn_b(pretrained=False):
  function resnet101_ibn_b (line 307) | def resnet101_ibn_b(pretrained=False):

FILE: tllib/normalization/mixstyle/__init__.py
  class MixStyle (line 11) | class MixStyle(nn.Module):
    method __init__ (line 34) | def __init__(self, p=0.5, alpha=0.1, eps=1e-6):
    method forward (line 41) | def forward(self, x):

FILE: tllib/normalization/mixstyle/resnet.py
  function _resnet_with_mix_style (line 12) | def _resnet_with_mix_style(arch, block, layers, pretrained, progress, mi...
  function resnet18 (line 79) | def resnet18(pretrained=False, progress=True, **kwargs):
  function resnet34 (line 90) | def resnet34(pretrained=False, progress=True, **kwargs):
  function resnet50 (line 101) | def resnet50(pretrained=False, progress=True, **kwargs):
  function resnet101 (line 112) | def resnet101(pretrained=False, progress=True, **kwargs):

FILE: tllib/normalization/mixstyle/sampler.py
  class RandomDomainMultiInstanceSampler (line 11) | class RandomDomainMultiInstanceSampler(Sampler):
    method __init__ (line 22) | def __init__(self, dataset, batch_size, n_domains_per_batch, num_insta...
    method __iter__ (line 42) | def __iter__(self):
    method sample_multi_instances (line 64) | def sample_multi_instances(self, sample_idxes):
    method __len__ (line 82) | def __len__(self):

FILE: tllib/normalization/stochnorm.py
  class _StochNorm (line 15) | class _StochNorm(nn.Module):
    method __init__ (line 17) | def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, ...
    method reset_parameters (line 40) | def reset_parameters(self):
    method _check_input_dim (line 48) | def _check_input_dim(self, input):
    method forward (line 51) | def forward(self, input):
  class StochNorm1d (line 87) | class StochNorm1d(_StochNorm):
    method _check_input_dim (line 136) | def _check_input_dim(self, input):
  class StochNorm2d (line 142) | class StochNorm2d(_StochNorm):
    method _check_input_dim (line 192) | def _check_input_dim(self, input):
  class StochNorm3d (line 198) | class StochNorm3d(_StochNorm):
    method _check_input_dim (line 248) | def _check_input_dim(self, input):
  function convert_model (line 254) | def convert_model(module, p):

FILE: tllib/ranking/hscore.py
  function h_score (line 11) | def h_score(features: np.ndarray, labels: np.ndarray):
  function regularized_h_score (line 49) | def regularized_h_score(features: np.ndarray, labels: np.ndarray):

FILE: tllib/ranking/leep.py
  function log_expected_empirical_prediction (line 11) | def log_expected_empirical_prediction(predictions: np.ndarray, labels: n...

FILE: tllib/ranking/logme.py
  function log_maximum_evidence (line 11) | def log_maximum_evidence(features: np.ndarray, targets: np.ndarray, regr...
  function each_evidence (line 65) | def each_evidence(y_, f, fh, v, s, vh, N, D):

FILE: tllib/ranking/nce.py
  function negative_conditional_entropy (line 10) | def negative_conditional_entropy(source_labels: np.ndarray, target_label...

FILE: tllib/ranking/transrate.py
  function coding_rate (line 10) | def coding_rate(features: np.ndarray, eps=1e-4):
  function transrate (line 17) | def transrate(features: np.ndarray, labels: np.ndarray, eps=1e-4):

FILE: tllib/regularization/bi_tuning.py
  class Classifier (line 11) | class Classifier(ClassifierBase):
    method __init__ (line 43) | def __init__(self, backbone: nn.Module, num_classes: int, projection_d...
    method forward (line 52) | def forward(self, x: torch.Tensor):
    method get_parameters (line 66) | def get_parameters(self, base_lr=1.0):
  class BiTuning (line 80) | class BiTuning(nn.Module):
    method __init__ (line 112) | def __init__(self, encoder_q: Classifier, encoder_k: Classifier, num_c...
    method _momentum_update_key_encoder (line 137) | def _momentum_update_key_encoder(self):
    method _dequeue_and_enqueue (line 145) | def _dequeue_and_enqueue(self, h, z, label):
    method forward (line 157) | def forward(self, im_q, im_k, labels):

FILE: tllib/regularization/bss.py
  class BatchSpectralShrinkage (line 11) | class BatchSpectralShrinkage(nn.Module):
    method __init__ (line 40) | def __init__(self, k=1):
    method forward (line 44) | def forward(self, feature):

FILE: tllib/regularization/co_tuning.py
  class CoTuningLoss (line 17) | class CoTuningLoss(nn.Module):
    method __init__ (line 32) | def __init__(self):
    method forward (line 35) | def forward(self, input: torch.Tensor, target: torch.Tensor) -> torch....
  class Relationship (line 41) | class Relationship(object):
    method __init__ (line 51) | def __init__(self, data_loader, classifier, device, cache=None):
    method __getitem__ (line 64) | def __getitem__(self, category):
    method collect_labels (line 67) | def collect_labels(self):
    method get_category_relationship (line 91) | def get_category_relationship(self, source_probabilities, target_labels):
  class Classifier (line 111) | class Classifier(ClassifierBase):
    method __init__ (line 136) | def __init__(self, backbone: nn.Module, num_classes: int,  head_source...
    method get_parameters (line 139) | def get_parameters(self, base_lr=1.0) -> List[Dict]:

FILE: tllib/regularization/delta.py
  class L2Regularization (line 12) | class L2Regularization(nn.Module):
    method __init__ (line 24) | def __init__(self, model: nn.Module):
    method forward (line 28) | def forward(self):
  class SPRegularization (line 35) | class SPRegularization(nn.Module):
    method __init__ (line 55) | def __init__(self, source_model: nn.Module, target_model: nn.Module):
    method forward (line 62) | def forward(self):
  class BehavioralRegularization (line 69) | class BehavioralRegularization(nn.Module):
    method __init__ (line 92) | def __init__(self):
    method forward (line 95) | def forward(self, layer_outputs_source, layer_outputs_target):
  class AttentionBehavioralRegularization (line 102) | class AttentionBehavioralRegularization(nn.Module):
    method __init__ (line 129) | def __init__(self, channel_attention):
    method forward (line 133) | def forward(self, layer_outputs_source, layer_outputs_target):
  function get_attribute (line 147) | def get_attribute(obj, attr, *args):
  class IntermediateLayerGetter (line 153) | class IntermediateLayerGetter:
    method __init__ (line 167) | def __init__(self, model, return_layers, keep_output=True):
    method __call__ (line 172) | def __call__(self, *args, **kwargs):

FILE: tllib/regularization/knowledge_distillation.py
  class KnowledgeDistillationLoss (line 5) | class KnowledgeDistillationLoss(nn.Module):
    method __init__ (line 24) | def __init__(self, T=1., reduction='batchmean'):
    method forward (line 29) | def forward(self, y_student, y_teacher):

FILE: tllib/regularization/lwf.py
  function collect_pretrain_labels (line 11) | def collect_pretrain_labels(data_loader, classifier, device):
  class Classifier (line 23) | class Classifier(nn.Module):
    method __init__ (line 48) | def __init__(self, backbone: nn.Module, num_classes: int,  head_source,
    method features_dim (line 77) | def features_dim(self) -> int:
    method forward (line 81) | def forward(self, x: torch.Tensor):
    method get_parameters (line 92) | def get_parameters(self, base_lr=1.0) -> List[Dict]:

FILE: tllib/reweight/groupdro.py
  class AutomaticUpdateDomainWeightModule (line 9) | class AutomaticUpdateDomainWeightModule(object):
    method __init__ (line 34) | def __init__(self, num_domains: int, eta: float, device):
    method get_domain_weight (line 38) | def get_domain_weight(self, sampled_domain_idxes):
    method update (line 52) | def update(self, sampled_domain_losses: torch.Tensor, sampled_domain_i...

FILE: tllib/reweight/iwan.py
  class ImportanceWeightModule (line 12) | class ImportanceWeightModule(object):
    method __init__ (line 36) | def __init__(self, discriminator: nn.Module, partial_classes_index: Op...
    method get_importance_weight (line 40) | def get_importance_weight(self, feature):
    method get_partial_classes_weight (line 55) | def get_partial_classes_weight(self, weights: torch.Tensor, labels: to...
  class ImageClassifier (line 84) | class ImageClassifier(ClassifierBase):
    method __init__ (line 88) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...

FILE: tllib/reweight/pada.py
  class AutomaticUpdateClassWeightModule (line 13) | class AutomaticUpdateClassWeightModule(object):
    method __init__ (line 41) | def __init__(self, update_steps: int, data_loader: DataLoader,
    method step (line 56) | def step(self):
    method get_class_weight_for_cross_entropy_loss (line 62) | def get_class_weight_for_cross_entropy_loss(self):
    method get_class_weight_for_adversarial_loss (line 70) | def get_class_weight_for_adversarial_loss(self, source_labels: torch.T...
    method get_partial_classes_weight (line 84) | def get_partial_classes_weight(self):
  class ClassWeightModule (line 98) | class ClassWeightModule(nn.Module):
    method __init__ (line 124) | def __init__(self, temperature: Optional[float] = 0.1):
    method forward (line 128) | def forward(self, outputs: torch.Tensor):
  function collect_classification_results (line 137) | def collect_classification_results(data_loader: DataLoader, classifier: ...

FILE: tllib/self_training/cc_loss.py
  class CCConsistency (line 17) | class CCConsistency(nn.Module):
    method __init__ (line 46) | def __init__(self, temperature: float, thr=0.7):
    method forward (line 51) | def forward(self, logits: torch.Tensor, logits_strong: torch.Tensor) -...

FILE: tllib/self_training/dst.py
  class ImageClassifier (line 13) | class ImageClassifier(Classifier):
    method __init__ (line 41) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...
    method forward (line 65) | def forward(self, x: torch.Tensor):
    method get_parameters (line 77) | def get_parameters(self, base_lr=1.0):
    method step (line 91) | def step(self):
  function shift_log (line 95) | def shift_log(x, offset=1e-6):
  class WorstCaseEstimationLoss (line 103) | class WorstCaseEstimationLoss(nn.Module):
    method __init__ (line 134) | def __init__(self, eta_prime):
    method forward (line 138) | def forward(self, y_l, y_l_adv, y_u, y_u_adv):

FILE: tllib/self_training/flexmatch.py
  class DynamicThresholdingModule (line 10) | class DynamicThresholdingModule(object):
    method __init__ (line 41) | def __init__(self, threshold, warmup, mapping_func, num_classes, n_unl...
    method get_threshold (line 51) | def get_threshold(self, pseudo_labels):
    method update (line 71) | def update(self, idxes, selected_mask, pseudo_labels):

FILE: tllib/self_training/mcc.py
  class MinimumClassConfusionLoss (line 17) | class MinimumClassConfusionLoss(nn.Module):
    method __init__ (line 61) | def __init__(self, temperature: float):
    method forward (line 65) | def forward(self, logits: torch.Tensor) -> torch.Tensor:
  class ImageClassifier (line 77) | class ImageClassifier(ClassifierBase):
    method __init__ (line 78) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...

FILE: tllib/self_training/mean_teacher.py
  function set_requires_grad (line 6) | def set_requires_grad(net, requires_grad=False):
  class EMATeacher (line 14) | class EMATeacher(object):
    method __init__ (line 47) | def __init__(self, model, alpha):
    method set_alpha (line 53) | def set_alpha(self, alpha: float):
    method update (line 57) | def update(self):
    method __call__ (line 61) | def __call__(self, x: torch.Tensor):
    method train (line 64) | def train(self, mode: Optional[bool] = True):
    method eval (line 67) | def eval(self):
    method state_dict (line 70) | def state_dict(self):
    method load_state_dict (line 73) | def load_state_dict(self, state_dict):
    method module (line 77) | def module(self):
  function update_bn (line 81) | def update_bn(model, ema_model):

FILE: tllib/self_training/pi_model.py
  function sigmoid_warm_up (line 11) | def sigmoid_warm_up(current_epoch, warm_up_epochs: int):
  class ConsistencyLoss (line 24) | class ConsistencyLoss(nn.Module):
    method __init__ (line 49) | def __init__(self, distance_measure: Callable, reduction: Optional[str...
    method forward (line 54) | def forward(self, p1: torch.Tensor, p2: torch.Tensor, mask=1.):
  class L2ConsistencyLoss (line 65) | class L2ConsistencyLoss(ConsistencyLoss):
    method __init__ (line 75) | def __init__(self, reduction: Optional[str] = 'mean'):

FILE: tllib/self_training/pseudo_label.py
  class ConfidenceBasedSelfTrainingLoss (line 10) | class ConfidenceBasedSelfTrainingLoss(nn.Module):
    method __init__ (line 36) | def __init__(self, threshold: float):
    method forward (line 40) | def forward(self, y, y_target):

FILE: tllib/self_training/self_ensemble.py
  class ClassBalanceLoss (line 13) | class ClassBalanceLoss(nn.Module):
    method __init__ (line 38) | def __init__(self, num_classes):
    method forward (line 42) | def forward(self, p: torch.Tensor):
  class ImageClassifier (line 46) | class ImageClassifier(ClassifierBase):
    method __init__ (line 47) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck_d...

FILE: tllib/self_training/self_tuning.py
  class Classifier (line 12) | class Classifier(ClassifierBase):
    method __init__ (line 38) | def __init__(self, backbone: nn.Module, num_classes: int, projection_d...
    method forward (line 54) | def forward(self, x: torch.Tensor):
    method get_parameters (line 67) | def get_parameters(self, base_lr=1.0):
  class SelfTuning (line 78) | class SelfTuning(nn.Module):
    method __init__ (line 108) | def __init__(self, encoder_q, encoder_k, num_classes, K=32, m=0.999, T...
    method _momentum_update_key_encoder (line 130) | def _momentum_update_key_encoder(self):
    method _dequeue_and_enqueue (line 138) | def _dequeue_and_enqueue(self, h, label):
    method forward (line 150) | def forward(self, im_q, im_k, labels):

FILE: tllib/self_training/uda.py
  class StrongWeakConsistencyLoss (line 10) | class StrongWeakConsistencyLoss(nn.Module):
    method __init__ (line 29) | def __init__(self, threshold: float, temperature: float):
    method forward (line 34) | def forward(self, y_strong, y):

FILE: tllib/translation/cycada.py
  class SemanticConsistency (line 9) | class SemanticConsistency(nn.Module):
    method __init__ (line 49) | def __init__(self, ignore_index=(), reduction='mean'):
    method forward (line 54) | def forward(self, input: Tensor, target: Tensor) -> Tensor:

FILE: tllib/translation/cyclegan/discriminator.py
  class NLayerDiscriminator (line 12) | class NLayerDiscriminator(nn.Module):
    method __init__ (line 22) | def __init__(self, input_nc, ndf=64, n_layers=3, norm_layer=nn.BatchNo...
    method forward (line 54) | def forward(self, input):
  class PixelDiscriminator (line 58) | class PixelDiscriminator(nn.Module):
    method __init__ (line 67) | def __init__(self, input_nc, ndf=64, norm_layer=nn.BatchNorm2d):
    method forward (line 84) | def forward(self, input):
  function patch (line 88) | def patch(ndf, input_nc=3, norm='batch', n_layers=3, init_type='normal',...
  function pixel (line 111) | def pixel(ndf, input_nc=3, norm='batch', init_type='normal', init_gain=0...

FILE: tllib/translation/cyclegan/generator.py
  class ResnetBlock (line 12) | class ResnetBlock(nn.Module):
    method __init__ (line 15) | def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
    method build_conv_block (line 26) | def build_conv_block(self, dim, padding_type, norm_layer, use_dropout,...
    method forward (line 66) | def forward(self, x):
  class ResnetGenerator (line 72) | class ResnetGenerator(nn.Module):
    method __init__ (line 79) | def __init__(self, input_nc, output_nc, ngf=64, norm_layer=nn.BatchNor...
    method forward (line 129) | def forward(self, input):
  class UnetGenerator (line 134) | class UnetGenerator(nn.Module):
    method __init__ (line 137) | def __init__(self, input_nc, output_nc, num_downs, ngf=64, norm_layer=...
    method forward (line 161) | def forward(self, input):
  class UnetSkipConnectionBlock (line 166) | class UnetSkipConnectionBlock(nn.Module):
    method __init__ (line 172) | def __init__(self, outer_nc, inner_nc, input_nc=None,
    method forward (line 229) | def forward(self, x):
  function resnet_9 (line 236) | def resnet_9(ngf, input_nc=3, output_nc=3, norm='batch', use_dropout=False,
  function resnet_6 (line 257) | def resnet_6(ngf, input_nc=3, output_nc=3, norm='batch', use_dropout=False,
  function unet_256 (line 278) | def unet_256(ngf, input_nc=3, output_nc=3, norm='batch', use_dropout=False,
  function unet_128 (line 301) | def unet_128(ngf, input_nc=3, output_nc=3, norm='batch', use_dropout=False,
  function unet_32 (line 324) | def unet_32(ngf, input_nc=3, output_nc=3, norm='batch', use_dropout=False,

FILE: tllib/translation/cyclegan/loss.py
  class LeastSquaresGenerativeAdversarialLoss (line 10) | class LeastSquaresGenerativeAdversarialLoss(nn.Module):
    method __init__ (line 28) | def __init__(self, reduction='mean'):
    method forward (line 32) | def forward(self, prediction, real=True):
  class VanillaGenerativeAdversarialLoss (line 40) | class VanillaGenerativeAdversarialLoss(nn.Module):
    method __init__ (line 58) | def __init__(self, reduction='mean'):
    method forward (line 62) | def forward(self, prediction, real=True):
  class WassersteinGenerativeAdversarialLoss (line 70) | class WassersteinGenerativeAdversarialLoss(nn.Module):
    method __init__ (line 88) | def __init__(self, reduction='mean'):
    method forward (line 92) | def forward(self, prediction, real=True):

FILE: tllib/translation/cyclegan/transform.py
  class Translation (line 12) | class Translation(nn.Module):
    method __init__ (line 28) | def __init__(self, generator, device=torch.device("cpu"), mean=(0.5, 0...
    method forward (line 41) | def forward(self, image):

FILE: tllib/translation/cyclegan/util.py
  class Identity (line 12) | class Identity(nn.Module):
    method forward (line 13) | def forward(self, x):
  function get_norm_layer (line 17) | def get_norm_layer(norm_type='instance'):
  function init_weights (line 37) | def init_weights(net, init_type='normal', init_gain=0.02):
  class ImagePool (line 72) | class ImagePool:
    method __init__ (line 83) | def __init__(self, pool_size):
    method query (line 89) | def query(self, images):
  function set_requires_grad (line 123) | def set_requires_grad(net, requires_grad=False):

FILE: tllib/translation/fourier_transform.py
  function low_freq_mutate (line 14) | def low_freq_mutate(amp_src: np.ndarray, amp_trg: np.ndarray, beta: Opti...
  class FourierTransform (line 47) | class FourierTransform(nn.Module):
    method __init__ (line 116) | def __init__(self, image_list: Sequence[str], amplitude_dir: str,
    method build_amplitude (line 127) | def build_amplitude(image_list, amplitude_dir):
    method forward (line 137) | def forward(self, image):

FILE: tllib/translation/spgan/loss.py
  class ContrastiveLoss (line 10) | class ContrastiveLoss(torch.nn.Module):
    method __init__ (line 33) | def __init__(self, margin=2.0):
    method forward (line 37) | def forward(self, output1, output2, label):

FILE: tllib/translation/spgan/siamese.py
  class ConvBlock (line 10) | class ConvBlock(nn.Module):
    method __init__ (line 12) | def __init__(self, in_dim, out_dim):
    method forward (line 20) | def forward(self, x):
  class SiameseNetwork (line 24) | class SiameseNetwork(nn.Module):
    method __init__ (line 30) | def __init__(self, nsf=64):
    method forward (line 45) | def forward(self, x):

FILE: tllib/utils/analysis/__init__.py
  function collect_feature (line 7) | def collect_feature(data_loader: DataLoader, feature_extractor: nn.Module,

FILE: tllib/utils/analysis/a_distance.py
  class ANet (line 15) | class ANet(nn.Module):
    method __init__ (line 16) | def __init__(self, in_feature):
    method forward (line 21) | def forward(self, x):
  function calculate (line 27) | def calculate(source_feature: torch.Tensor, target_feature: torch.Tensor,

FILE: tllib/utils/analysis/tsne.py
  function visualize (line 15) | def visualize(source_feature: torch.Tensor, target_feature: torch.Tensor,

FILE: tllib/utils/data.py
  function send_to_device (line 18) | def send_to_device(tensor, device):
  class ForeverDataIterator (line 40) | class ForeverDataIterator:
    method __init__ (line 43) | def __init__(self, data_loader: DataLoader, device=None):
    method __next__ (line 48) | def __next__(self):
    method __len__ (line 60) | def __len__(self):
  class RandomMultipleGallerySampler (line 64) | class RandomMultipleGallerySampler(Sampler):
    method __init__ (line 76) | def __init__(self, dataset, num_instances=4):
    method __len__ (line 97) | def __len__(self):
    method __iter__ (line 100) | def __iter__(self):
  class CombineDataset (line 141) | class CombineDataset(Dataset[T_co]):
    method __init__ (line 151) | def __init__(self, datasets: Iterable[Dataset]) -> None:
    method __len__ (line 157) | def __len__(self):
    method __getitem__ (line 160) | def __getitem__(self, idx):
  function concatenate (line 164) | def concatenate(tensors):

FILE: tllib/utils/logger.py
  class TextLogger (line 9) | class TextLogger(object):
    method __init__ (line 16) | def __init__(self, filename, stream=sys.stdout):
    method write (line 20) | def write(self, message):
    method flush (line 25) | def flush(self):
    method close (line 29) | def close(self):
  class CompleteLogger (line 34) | class CompleteLogger:
    method __init__ (line 47) | def __init__(self, root, phase='train'):
    method set_epoch (line 69) | def set_epoch(self, epoch):
    method _get_phase_or_epoch (line 74) | def _get_phase_or_epoch(self):
    method get_image_path (line 80) | def get_image_path(self, filename: str):
    method get_checkpoint_path (line 86) | def get_checkpoint_path(self, name=None):
    method close (line 101) | def close(self):

FILE: tllib/utils/meter.py
  class AverageMeter (line 8) | class AverageMeter(object):
    method __init__ (line 18) | def __init__(self, name: str, fmt: Optional[str] = ':f'):
    method reset (line 23) | def reset(self):
    method update (line 29) | def update(self, val, n=1):
    method __str__ (line 36) | def __str__(self):
  class AverageMeterDict (line 41) | class AverageMeterDict(object):
    method __init__ (line 42) | def __init__(self, names: List, fmt: Optional[str] = ':f'):
    method reset (line 47) | def reset(self):
    method update (line 51) | def update(self, accuracies, n=1):
    method average (line 55) | def average(self):
    method __getitem__ (line 60) | def __getitem__(self, item):
  class Meter (line 64) | class Meter(object):
    method __init__ (line 66) | def __init__(self, name: str, fmt: Optional[str] = ':f'):
    method reset (line 71) | def reset(self):
    method update (line 74) | def update(self, val):
    method __str__ (line 77) | def __str__(self):
  class ProgressMeter (line 82) | class ProgressMeter(object):
    method __init__ (line 83) | def __init__(self, num_batches, meters, prefix=""):
    method display (line 88) | def display(self, batch):
    method _get_batch_fmtstr (line 93) | def _get_batch_fmtstr(self, num_batches):

FILE: tllib/utils/metric/__init__.py
  function binary_accuracy (line 6) | def binary_accuracy(output: torch.Tensor, target: torch.Tensor) -> float:
  function accuracy (line 16) | def accuracy(output, target, topk=(1,)):
  class ConfusionMatrix (line 43) | class ConfusionMatrix(object):
    method __init__ (line 44) | def __init__(self, num_classes):
    method update (line 48) | def update(self, target, output):
    method reset (line 68) | def reset(self):
    method compute (line 71) | def compute(self):
    method __str__ (line 87) | def __str__(self):
    method format (line 99) | def format(self, classes: list):

FILE: tllib/utils/metric/keypoint_detection.py
  function get_max_preds (line 9) | def get_max_preds(batch_heatmaps):
  function calc_dists (line 40) | def calc_dists(preds, target, normalize):
  function dist_acc (line 55) | def dist_acc(dists, thr=0.5):
  function accuracy (line 65) | def accuracy(output, target, hm_type='gaussian', thr=0.5):

FILE: tllib/utils/metric/reid.py
  function unique_sample (line 18) | def unique_sample(ids_dict, num):
  function cmc (line 27) | def cmc(dist_mat, query_ids, gallery_ids, query_cams, gallery_cams, topk...
  function mean_ap (line 79) | def mean_ap(dist_mat, query_ids, gallery_ids, query_cams, gallery_cams):
  function re_ranking (line 105) | def re_ranking(q_g_dist, q_q_dist, g_g_dist, k1=20, k2=6, lambda_value=0...
  function extract_reid_feature (line 178) | def extract_reid_feature(data_loader, model, device, normalize, print_fr...
  function pairwise_distance (line 214) | def pairwise_distance(feature_dict, query, gallery):
  function evaluate_all (line 233) | def evaluate_all(dist_mat, query, gallery, cmc_topk=(1, 5, 10), cmc_flag...
  function validate (line 259) | def validate(val_loader, model, query, gallery, device, criterion='cosin...
  function visualize_ranked_results (line 286) | def visualize_ranked_results(data_loader, model, query, gallery, device,...

FILE: tllib/utils/scheduler.py
  class WarmupMultiStepLR (line 10) | class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
    method __init__ (line 27) | def __init__(
    method get_lr (line 55) | def get_lr(self):

FILE: tllib/vision/datasets/_util.py
  function download (line 10) | def download(root: str, file_name: str, archive_name: str, url_link: str):
  function check_exits (line 37) | def check_exits(root: str, file_name: str):
  function read_list_from_file (line 44) | def read_list_from_file(file_name: str) -> List[str]:

FILE: tllib/vision/datasets/aircrafts.py
  class Aircraft (line 11) | class Aircraft(ImageList):
    method __init__ (line 66) | def __init__(self, root: str, split: str, sample_rate: Optional[int] =...

FILE: tllib/vision/datasets/caltech101.py
  class Caltech101 (line 10) | class Caltech101(ImageList):
    method __init__ (line 31) | def __init__(self, root, split='train', download=True, **kwargs):

FILE: tllib/vision/datasets/cifar.py
  class CIFAR10 (line 8) | class CIFAR10(CIFAR10Base):
    method __init__ (line 13) | def __init__(self, root, split='train', transform=None, download=True):
  class CIFAR100 (line 18) | class CIFAR100(CIFAR100Base):
    method __init__ (line 23) | def __init__(self, root, split='train', transform=None, download=True):

FILE: tllib/vision/datasets/coco70.py
  class COCO70 (line 11) | class COCO70(ImageList):
    method __init__ (line 61) | def __init__(self, root: str, split: str, sample_rate: Optional[int] =...

FILE: tllib/vision/datasets/cub200.py
  class CUB200 (line 11) | class CUB200(ImageList):
    method __init__ (line 103) | def __init__(self, root: str, split: str, sample_rate: Optional[int] =...

FILE: tllib/vision/datasets/digits.py
  class MNIST (line 11) | class MNIST(ImageList):
    method __init__ (line 38) | def __init__(self, root, mode="L", split='train', download: Optional[b...
    method __getitem__ (line 51) | def __getitem__(self, index: int) -> Tuple[Any, int]:
    method get_classes (line 67) | def get_classes(self):
  class USPS (line 71) | class USPS(ImageList):
    method __init__ (line 101) | def __init__(self, root, mode="L", split='train', download: Optional[b...
    method __getitem__ (line 114) | def __getitem__(self, index: int) -> Tuple[Any, int]:
  class SVHN (line 130) | class SVHN(ImageList):
    method __init__ (line 162) | def __init__(self, root, mode="L", download: Optional[bool] = True, **...
    method __getitem__ (line 174) | def __getitem__(self, index: int) -> Tuple[Any, int]:
  class MNISTRGB (line 190) | class MNISTRGB(MNIST):
    method __init__ (line 191) | def __init__(self, root, **kwargs):
  class USPSRGB (line 195) | class USPSRGB(USPS):
    method __init__ (line 196) | def __init__(self, root, **kwargs):
  class SVHNRGB (line 200) | class SVHNRGB(SVHN):
    method __init__ (line 201) | def __init__(self, root, **kwargs):

FILE: tllib/vision/datasets/domainnet.py
  class DomainNet (line 11) | class DomainNet(ImageList):
    method __init__ (line 95) | def __init__(self, root: str, task: str, split: Optional[str] = 'train...
    method domains (line 109) | def domains(cls):

FILE: tllib/vision/datasets/dtd.py
  class DTD (line 10) | class DTD(ImageList):
    method __init__ (line 41) | def __init__(self, root, split, download=False, **kwargs):

FILE: tllib/vision/datasets/eurosat.py
  class EuroSAT (line 10) | class EuroSAT(ImageList):
    method __init__ (line 30) | def __init__(self, root, split='train', download=False, **kwargs):

FILE: tllib/vision/datasets/food101.py
  class Food101 (line 10) | class Food101(ImageFolder):
    method __init__ (line 32) | def __init__(self, root, split='train', transform=None, download=True):

FILE: tllib/vision/datasets/imagelist.py
  class ImageList (line 15) | class ImageList(datasets.VisionDataset):
    method __init__ (line 37) | def __init__(self, root: str, classes: List[str], data_list_file: str,
    method __getitem__ (line 48) | def __getitem__(self, index: int) -> Tuple[Any, int]:
    method __len__ (line 62) | def __len__(self) -> int:
    method parse_data_file (line 65) | def parse_data_file(self, file_name: str) -> List[Tuple[str, int]]:
    method num_classes (line 85) | def num_classes(self) -> int:
    method domains (line 90) | def domains(cls):
  class MultipleDomainsDataset (line 95) | class MultipleDomainsDataset(Dataset[T_co]):
    method cumsum (line 107) | def cumsum(sequence):
    method __init__ (line 115) | def __init__(self, domains: Iterable[Dataset], domain_names: Iterable[...
    method __len__ (line 126) | def __len__(self):
    method __getitem__ (line 129) | def __getitem__(self, idx):
    method cummulative_sizes (line 142) | def cummulative_sizes(self):

FILE: tllib/vision/datasets/imagenet_r.py
  class ImageNetR (line 11) | class ImageNetR(ImageList):
    method __init__ (line 68) | def __init__(self, root: str, task: str, split: Optional[str] = 'all',...
    method domains (line 84) | def domains(cls):

FILE: tllib/vision/datasets/imagenet_sketch.py
  class ImageNetSketch (line 12) | class ImageNetSketch(ImageList):
    method __init__ (line 52) | def __init__(self, root: str, task: str, split: Optional[str] = 'all',...
    method domains (line 68) | def domains(cls):

FILE: tllib/vision/datasets/keypoint_detection/freihand.py
  function _assert_exist (line 18) | def _assert_exist(p):
  function json_load (line 23) | def json_load(p):
  function load_db_annotation (line 30) | def load_db_annotation(base_path, set_name=None):
  function projectPoints (line 56) | def projectPoints(xyz, K):
  function db_size (line 65) | def db_size(set_name):
  class sample_version (line 75) | class sample_version:
    method valid_options (line 84) | def valid_options(cls):
    method check_valid (line 89) | def check_valid(cls, version):
    method map_id (line 94) | def map_id(cls, id, version):
  class FreiHand (line 99) | class FreiHand(Hand21KeypointDataset):
    method __init__ (line 123) | def __init__(self, root, split='train', task='all', download=True, **k...
    method __getitem__ (line 150) | def __getitem__(self, index):
    method get_samples (line 201) | def get_samples(self, root, version='gs'):

FILE: tllib/vision/datasets/keypoint_detection/hand_3d_studio.py
  class Hand3DStudio (line 19) | class Hand3DStudio(Hand21KeypointDataset):
    method __init__ (line 49) | def __init__(self, root, split='train', task='noobject', download=True...
    method __getitem__ (line 82) | def __getitem__(self, index):
  class Hand3DStudioAll (line 119) | class Hand3DStudioAll(Hand3DStudio):
    method __init__ (line 124) | def __init__(self,  root, task='all', **kwargs):

FILE: tllib/vision/datasets/keypoint_detection/human36m.py
  class Human36M (line 17) | class Human36M(Body16KeypointDataset):
    method __init__ (line 50) | def __init__(self, root, split='train', task='all', download=True, **k...
    method __getitem__ (line 75) | def __getitem__(self, index):
    method preprocess (line 111) | def preprocess(self, part, root):

FILE: tllib/vision/datasets/keypoint_detection/keypoint_dataset.py
  class KeypointDataset (line 12) | class KeypointDataset(Dataset, ABC):
    method __init__ (line 27) | def __init__(self, root, num_keypoints, samples, transforms=None, imag...
    method __len__ (line 39) | def __len__(self):
    method visualize (line 42) | def visualize(self, image, keypoints, filename):
    method group_accuracy (line 62) | def group_accuracy(self, accuracies):
  class Body16KeypointDataset (line 78) | class Body16KeypointDataset(KeypointDataset, ABC):
    method __init__ (line 97) | def __init__(self, root, samples, **kwargs):
  class Hand21KeypointDataset (line 119) | class Hand21KeypointDataset(KeypointDataset, ABC):
    method __init__ (line 135) | def __init__(self, root, samples, **kwargs):

FILE: tllib/vision/datasets/keypoint_detection/lsp.py
  class LSP (line 19) | class LSP(Body16KeypointDataset):
    method __init__ (line 42) | def __init__(self, root, split='train', task='all', download=True, ima...
    method __getitem__ (line 69) | def __getitem__(self, index):

FILE: tllib/vision/datasets/keypoint_detection/rendered_hand_pose.py
  class RenderedHandPose (line 15) | class RenderedHandPose(Hand21KeypointDataset):
    method __init__ (line 36) | def __init__(self, root, split='train', task='all', download=True, **k...
    method __getitem__ (line 54) | def __getitem__(self, index):
    method get_samples (line 106) | def get_samples(self, root, task, min_size=64):

FILE: tllib/vision/datasets/keypoint_detection/surreal.py
  class SURREAL (line 17) | class SURREAL(Body16KeypointDataset):
    method __init__ (line 43) | def __init__(self, root, split='train', task='all', download=True, **k...
    method __getitem__ (line 82) | def __getitem__(self, index):
    method __len__ (line 120) | def __len__(self):

FILE: tllib/vision/datasets/keypoint_detection/util.py
  function generate_target (line 9) | def generate_target(joints, joints_vis, heatmap_size, sigma, image_size):
  function keypoint2d_to_3d (line 71) | def keypoint2d_to_3d(keypoint2d: np.ndarray, intrinsic_matrix: np.ndarra...
  function keypoint3d_to_2d (line 78) | def keypoint3d_to_2d(keypoint3d: np.ndarray, intrinsic_matrix: np.ndarray):
  function scale_box (line 85) | def scale_box(box, image_width, image_height, scale):
  function get_bounding_box (line 114) | def get_bounding_box(keypoint2d: np.array):
  function visualize_heatmap (line 123) | def visualize_heatmap(image, heatmaps, filename):
  function area (line 135) | def area(left, upper, right, lower):
  function intersection (line 139) | def intersection(box_a, box_b):

FILE: tllib/vision/datasets/object_detection/__init__.py
  function parse_root_and_file_name (line 18) | def parse_root_and_file_name(path):
  class VOCBase (line 27) | class VOCBase:
    method __init__ (line 34) | def __init__(self, root, split="trainval", year=2007, ext='.jpg', down...
  class VOC2007 (line 45) | class VOC2007(VOCBase):
    method __init__ (line 49) | def __init__(self, root):
  class VOC2012 (line 53) | class VOC2012(VOCBase):
    method __init__ (line 57) | def __init__(self, root):
  class VOC2007Test (line 61) | class VOC2007Test(VOCBase):
    method __init__ (line 65) | def __init__(self, root):
  class Clipart (line 69) | class Clipart(VOCBase):
  class VOCPartialBase (line 74) | class VOCPartialBase:
    method __init__ (line 79) | def __init__(self, root, split="trainval", year=2007, ext='.jpg', down...
  class VOC2007Partial (line 90) | class VOC2007Partial(VOCPartialBase):
    method __init__ (line 94) | def __init__(self, root):
  class VOC2012Partial (line 98) | class VOC2012Partial(VOCPartialBase):
    method __init__ (line 102) | def __init__(self, root):
  class VOC2007PartialTest (line 106) | class VOC2007PartialTest(VOCPartialBase):
    method __init__ (line 110) | def __init__(self, root):
  class WaterColor (line 114) | class WaterColor(VOCPartialBase):
    method __init__ (line 118) | def __init__(self, root):
  class WaterColorTest (line 122) | class WaterColorTest(VOCPartialBase):
    method __init__ (line 126) | def __init__(self, root):
  class Comic (line 130) | class Comic(VOCPartialBase):
    method __init__ (line 134) | def __init__(self, root):
  class ComicTest (line 138) | class ComicTest(VOCPartialBase):
    method __init__ (line 142) | def __init__(self, root):
  class CityscapesBase (line 146) | class CityscapesBase:
    method __init__ (line 151) | def __init__(self, root, split="trainval", year=2007, ext='.png'):
  class Cityscapes (line 160) | class Cityscapes(CityscapesBase):
    method __init__ (line 161) | def __init__(self, root):
  class CityscapesTest (line 165) | class CityscapesTest(CityscapesBase):
    method __init__ (line 166) | def __init__(self, root):
  class FoggyCityscapes (line 170) | class FoggyCityscapes(Cityscapes):
  class FoggyCityscapesTest (line 174) | class FoggyCityscapesTest(CityscapesTest):
  class CityscapesCarBase (line 178) | class CityscapesCarBase:
    method __init__ (line 183) | def __init__(self, root, split="trainval", year=2007, ext='.png', bbox...
  class CityscapesCar (line 192) | class CityscapesCar(CityscapesCarBase):
  class CityscapesCarTest (line 196) | class CityscapesCarTest(CityscapesCarBase):
    method __init__ (line 197) | def __init__(self, root):
  class Sim10kCar (line 201) | class Sim10kCar(CityscapesCarBase):
    method __init__ (line 202) | def __init__(self, root):
  class KITTICar (line 206) | class KITTICar(CityscapesCarBase):
    method __init__ (line 207) | def __init__(self, root):
  class GTA5 (line 211) | class GTA5(CityscapesBase):
    method __init__ (line 212) | def __init__(self, root):
  function load_voc_instances (line 216) | def load_voc_instances(dirname: str, split: str, class_names, ext='.jpg'...
  function register_pascal_voc (line 275) | def register_pascal_voc(name, dirname, split, year, class_names, **kwargs):

FILE: tllib/vision/datasets/office31.py
  class Office31 (line 11) | class Office31(ImageList):
    method __init__ (line 54) | def __init__(self, root: str, task: str, download: Optional[bool] = Tr...
    method domains (line 66) | def domains(cls):

FILE: tllib/vision/datasets/officecaltech.py
  class OfficeCaltech (line 12) | class OfficeCaltech(DatasetFolder):
    method __init__ (line 50) | def __init__(self, root: str, task: str, download: Optional[bool] = Fa...
    method num_classes (line 70) | def num_classes(self):
    method domains (line 75) | def domains(cls):

FILE: tllib/vision/datasets/officehome.py
  class OfficeHome (line 11) | class OfficeHome(ImageList):
    method __init__ (line 59) | def __init__(self, root: str, task: str, download: Optional[bool] = Fa...
    method domains (line 71) | def domains(cls):

FILE: tllib/vision/datasets/openset/__init__.py
  function open_set (line 17) | def open_set(dataset_class: ClassVar, public_classes: Sequence[str],
  function default_open_set (line 67) | def default_open_set(dataset_class: ClassVar, source: bool) -> ClassVar:

FILE: tllib/vision/datasets/oxfordflowers.py
  class OxfordFlowers102 (line 10) | class OxfordFlowers102(ImageList):
    method __init__ (line 54) | def __init__(self, root, split='train', download=False, **kwargs):

FILE: tllib/vision/datasets/oxfordpets.py
  class OxfordIIITPets (line 11) | class OxfordIIITPets(ImageList):
    method __init__ (line 58) | def __init__(self, root: str, split: str, sample_rate: Optional[int] =...

FILE: tllib/vision/datasets/pacs.py
  class PACS (line 7) | class PACS(ImageList):
    method __init__ (line 50) | def __init__(self, root: str, task: str, split='all', download: Option...
    method domains (line 66) | def domains(cls):

FILE: tllib/vision/datasets/partial/__init__.py
  function partial (line 19) | def partial(dataset_class: ClassVar, partial_classes: Sequence[str]) -> ...
  function default_partial (line 59) | def default_partial(dataset_class: ClassVar) -> ClassVar:

FILE: tllib/vision/datasets/partial/caltech_imagenet.py
  class CaltechImageNet (line 48) | class CaltechImageNet(ImageList):
    method __init__ (line 87) | def __init__(self, root: str, task: str, download: Optional[bool] = Tr...
  class CaltechImageNetUniversal (line 104) | class CaltechImageNetUniversal(ImageList):
    method __init__ (line 143) | def __init__(self, root: str, task: str, download: Optional[bool] = Tr...

FILE: tllib/vision/datasets/partial/imagenet_caltech.py
  class ImageNetCaltech (line 391) | class ImageNetCaltech(ImageList):
    method __init__ (line 430) | def __init__(self, root: str, task: str, download: Optional[bool] = Tr...
  class ImageNetCaltechUniversal (line 447) | class ImageNetCaltechUniversal(ImageList):
    method __init__ (line 487) | def __init__(self, root: str, task: str, download: Optional[bool] = Tr...

FILE: tllib/vision/datasets/patchcamelyon.py
  class PatchCamelyon (line 10) | class PatchCamelyon(ImageList):
    method __init__ (line 28) | def __init__(self, root, split, download=False,  **kwargs):

FILE: tllib/vision/datasets/regression/dsprites.py
  class DSprites (line 11) | class DSprites(ImageRegression):
    method __init__ (line 53) | def __init__(self, root: str, task: str, split: Optional[str] = 'train',

FILE: tllib/vision/datasets/regression/image_regression.py
  class ImageRegression (line 12) | class ImageRegression(datasets.VisionDataset):
    method __init__ (line 34) | def __init__(self, root: str, factors: Sequence[str], data_list_file: ...
    method __getitem__ (line 42) | def __getitem__(self, index: int) -> Tuple[Any, Tuple[float]]:
    method __len__ (line 58) | def __len__(self) -> int:
    method parse_data_file (line 61) | def parse_data_file(self, file_name: str) -> List[Tuple[str, Any]]:
    method num_factors (line 82) | def num_factors(self) -> int:

FILE: tllib/vision/datasets/regression/mpi3d.py
  class MPI3D (line 11) | class MPI3D(ImageRegression):
    method __init__ (line 53) | def __init__(self, root: str, task: str, split: Optional[str] = 'train',

FILE: tllib/vision/datasets/reid/basedataset.py
  class BaseDataset (line 10) | class BaseDataset(object):
    method get_imagedata_info (line 15) | def get_imagedata_info(self, data):
    method get_videodata_info (line 27) | def get_videodata_info(self, data, return_tracklet_stats=False):
    method print_dataset_statistics (line 42) | def print_dataset_statistics(self, train, query, galler):
    method check_before_run (line 45) | def check_before_run(self, required_files):
    method images_dir (line 58) | def images_dir(self):
  class BaseImageDataset (line 62) | class BaseImageDataset(BaseDataset):
    method print_dataset_statistics (line 67) | def print_dataset_statistics(self, train, query, gallery):
  class BaseVideoDataset (line 82) | class BaseVideoDataset(BaseDataset):
    method print_dataset_statistics (line 87) | def print_dataset_statistics(self, train, query, gallery):

FILE: tllib/vision/datasets/reid/convert.py
  function convert_to_pytorch_dataset (line 10) | def convert_to_pytorch_dataset(dataset, root=None, transform=None, retur...

FILE: tllib/vision/datasets/reid/dukemtmc.py
  class DukeMTMC (line 15) | class DukeMTMC(BaseImageDataset):
    method __init__ (line 32) | def __init__(self, root, verbose=True):
    method process_dir (line 60) | def process_dir(self, dir_path, relabel=False):
    method translate (line 81) | def translate(self, transform: Callable, target_root: str):
    method translate_dir (line 102) | def translate_dir(self, transform, origin_dir: str, target_dir: str):

FILE: tllib/vision/datasets/reid/market1501.py
  class Market1501 (line 15) | class Market1501(BaseImageDataset):
    method __init__ (line 32) | def __init__(self, root, verbose=True):
    method process_dir (line 60) | def process_dir(self, dir_path, relabel=False):
    method translate (line 86) | def translate(self, transform: Callable, target_root: str):
    method translate_dir (line 107) | def translate_dir(self, transform, origin_dir: str, target_dir: str):

FILE: tllib/vision/datasets/reid/msmt17.py
  class MSMT17 (line 13) | class MSMT17(BaseImageDataset):
    method __init__ (line 30) | def __init__(self, root, verbose=True):
    method process_dir (line 53) | def process_dir(self, dir_path):
    method translate (line 71) | def translate(self, transform: Callable, target_root: str):
    method translate_dir (line 92) | def translate_dir(self, transform, origin_dir: str, target_dir: str):

FILE: tllib/vision/datasets/reid/personx.py
  class PersonX (line 16) | class PersonX(BaseImageDataset):
    method __init__ (line 33) | def __init__(self, root, verbose=True):
    method process_dir (line 61) | def process_dir(self, dir_path, relabel=False):
    method translate (line 84) | def translate(self, transform: Callable, target_root: str):
    method translate_dir (line 105) | def translate_dir(self, transform, origin_dir: str, target_dir: str):

FILE: tllib/vision/datasets/reid/unreal.py
  class UnrealPerson (line 12) | class UnrealPerson(BaseImageDataset):
    method __init__ (line 42) | def __init__(self, root, verbose=True):
    method process_dir (line 66) | def process_dir(self, list_file):
    method translate (line 86) | def translate(self, transform: Callable, target_root: str):

FILE: tllib/vision/datasets/resisc45.py
  class Resisc45 (line 10) | class Resisc45(ImageFolder):
    method __init__ (line 27) | def __init__(self, root, split='train', download=False, **kwargs):
    method num_classes (line 37) | def num_classes(self) -> int:

FILE: tllib/vision/datasets/retinopathy.py
  class Retinopathy (line 9) | class Retinopathy(ImageList):
    method __init__ (line 27) | def __init__(self, root, split, download=False, **kwargs):

FILE: tllib/vision/datasets/segmentation/cityscapes.py
  class Cityscapes (line 10) | class Cityscapes(SegmentationList):
    method __init__ (line 55) | def __init__(self, root, split='train', data_folder='leftImg8bit', lab...
    method parse_label_file (line 67) | def parse_label_file(self, label_list_file):
  class FoggyCityscapes (line 73) | class FoggyCityscapes(Cityscapes):
    method __init__ (line 99) | def __init__(self, root, split='train', data_folder='leftImg8bit_foggy...
    method parse_data_file (line 104) | def parse_data_file(self, file_name):

FILE: tllib/vision/datasets/segmentation/gta5.py
  class GTA5 (line 11) | class GTA5(SegmentationList):
    method __init__ (line 33) | def __init__(self, root, split='train', data_folder='images', label_fo...

FILE: tllib/vision/datasets/segmentation/segmentation_list.py
  class SegmentationList (line 14) | class SegmentationList(data.Dataset):
    method __init__ (line 42) | def __init__(self, root: str, classes: Sequence[str], data_list_file: ...
    method parse_data_file (line 59) | def parse_data_file(self, file_name):
    method parse_label_file (line 72) | def parse_label_file(self, file_name):
    method __len__ (line 85) | def __len__(self):
    method __getitem__ (line 88) | def __getitem__(self, index):
    method num_classes (line 107) | def num_classes(self) -> int:
    method decode_target (line 111) | def decode_target(self, target):
    method collect_image_paths (line 125) | def collect_image_paths(self):
    method _save_pil_image (line 130) | def _save_pil_image(image, path):
    method translate (line 134) | def translate(self, transform: Callable, target_root: str, color=False):
    method evaluate_classes (line 161) | def evaluate_classes(self):
    method ignore_classes (line 166) | def ignore_classes(self):

FILE: tllib/vision/datasets/segmentation/synthia.py
  class Synthia (line 11) | class Synthia(SegmentationList):
    method __init__ (line 38) | def __init__(self, root, split='train', data_folder='RGB', label_folde...
    method evaluate_classes (line 48) | def evaluate_classes(self):

FILE: tllib/vision/datasets/stanford_cars.py
  class StanfordCars (line 11) | class StanfordCars(ImageList):
    method __init__ (line 67) | def __init__(self, root: str, split: str, sample_rate: Optional[int] =...

FILE: tllib/vision/datasets/stanford_dogs.py
  class StanfordDogs (line 11) | class StanfordDogs(ImageList):
    method __init__ (line 95) | def __init__(self, root: str, split: str, sample_rate: Optional[int] =...

FILE: tllib/vision/datasets/sun397.py
  class SUN397 (line 10) | class SUN397(ImageList):
    method __init__ (line 30) | def __init__(self, root, split='train', download=True, **kwargs):

FILE: tllib/vision/datasets/visda2017.py
  class VisDA2017 (line 11) | class VisDA2017(ImageList):
    method __init__ (line 47) | def __init__(self, root: str, task: str, download: Optional[bool] = Fa...
    method domains (line 59) | def domains(cls):

FILE: tllib/vision/models/digits.py
  class LeNet (line 8) | class LeNet(nn.Sequential):
    method __init__ (line 9) | def __init__(self, num_classes=10):
    method copy_head (line 26) | def copy_head(self):
  class DTN (line 30) | class DTN(nn.Sequential):
    method __init__ (line 31) | def __init__(self, num_classes=10):
    method copy_head (line 54) | def copy_head(self):
  function lenet (line 59) | def lenet(pretrained=False, **kwargs):
  function dtn (line 73) | def dtn(pretrained=False, **kwargs):

FILE: tllib/vision/models/keypoint_detection/loss.py
  class JointsMSELoss (line 10) | class JointsMSELoss(nn.Module):
    method __init__ (line 33) | def __init__(self, reduction='mean'):
    method forward (line 38) | def forward(self, output, target, target_weight=None):
  class JointsKLLoss (line 51) | class JointsKLLoss(nn.Module):
    method __init__ (line 75) | def __init__(self, reduction='mean', epsilon=0.):
    method forward (line 81) | def forward(self, output, target, target_weight=None):

FILE: tllib/vision/models/keypoint_detection/pose_resnet.py
  class Upsampling (line 10) | class Upsampling(nn.Sequential):
    method __init__ (line 14) | def __init__(self, in_channel=2048, hidden_dims=(256, 256, 256), kerne...
  class PoseResNet (line 58) | class PoseResNet(nn.Module):
    method __init__ (line 69) | def __init__(self, backbone, upsampling, feature_dim, num_keypoints, f...
    method forward (line 79) | def forward(self, x):
    method get_parameters (line 85) | def get_parameters(self, lr=1.):
  function _pose_resnet (line 93) | def _pose_resnet(arch, num_keypoints, block, layers, pretrained_backbone...
  function pose_resnet101 (line 100) | def pose_resnet101(num_keypoints, pretrained_backbone=True, deconv_with_...

FILE: tllib/vision/models/object_detection/backbone/mmdetection/vgg.py
  function conv3x3 (line 8) | def conv3x3(in_planes, out_planes, dilation=1):
  function make_vgg_layer (line 18) | def make_vgg_layer(inplanes,
  class VGG (line 36) | class VGG(nn.Module):
    method __init__ (line 59) | def __init__(self,
    method _freeze_backbone (line 128) | def _freeze_backbone(self, freeze_at):
    method init_weights (line 140) | def init_weights(self, pretrained=None):
    method forward (line 154) | def forward(self, x):

FILE: tllib/vision/models/object_detection/backbone/mmdetection/weight_init.py
  function constant_init (line 8) | def constant_init(module, val, bias=0):
  function xavier_init (line 14) | def xavier_init(module, gain=1, bias=0, distribution='normal'):
  function normal_init (line 24) | def normal_init(module, mean=0, std=1, bias=0):
  function uniform_init (line 30) | def uniform_init(module, a=0, b=1, bias=0):
  function kaiming_init (line 36) | def kaiming_init(module,
  function caffe2_xavier_init (line 53) | def caffe2_xavier_init(module, bias=0):

FILE: tllib/vision/models/object_detection/backbone/vgg.py
  class FPN (line 9) | class FPN(nn.Module):
    method __init__ (line 16) | def __init__(
    method forward (line 45) | def forward(self, x):
  class LastLevelMaxPool (line 79) | class LastLevelMaxPool(nn.Module):
    method forward (line 80) | def forward(self, x):
  class LastLevelP6P7 (line 84) | class LastLevelP6P7(nn.Module):
    method __init__ (line 88) | def __init__(self, in_channels, out_channels):
    method forward (line 97) | def forward(self, c5, p5):
  class _NewEmptyTensorOp (line 104) | class _NewEmptyTensorOp(torch.autograd.Function):
    method forward (line 106) | def forward(ctx, x, new_shape):
    method backward (line 111) | def backward(ctx, grad):
  class Conv2d (line 116) | class Conv2d(torch.nn.Conv2d):
    method forward (line 117) | def forward(self, x):
  function conv_with_kaiming_uniform (line 132) | def conv_with_kaiming_uniform():
  class VGGFPN (line 157) | class VGGFPN(Backbone):
    method __init__ (line 158) | def __init__(self, body, fpn):
    method forward (line 170) | def forward(self, x):
  function build_vgg_fpn_backbone (line 180) | def build_vgg_fpn_backbone(cfg, input_shape):

FILE: tllib/vision/models/object_detection/meta_arch/rcnn.py
  class TLGeneralizedRCNN (line 12) | class TLGeneralizedRCNN(GeneralizedRCNNBase):
    method __init__ (line 55) | def __init__(self, *args, finetune=False, **kwargs):
    method forward (line 59) | def forward(self, batched_inputs: Tuple[Dict[str, torch.Tensor]], labe...
    method get_parameters (line 91) | def get_parameters(self, lr=1.):

FILE: tllib/vision/models/object_detection/meta_arch/retinanet.py
  class TLRetinaNet (line 13) | class TLRetinaNet(RetinaNetBase):
    method __init__ (line 82) | def __init__(self, *args, finetune=False, **kwargs):
    method forward (line 86) | def forward(self, batched_inputs: Tuple[Dict[str, Tensor]], labeled=Tr...
    method get_parameters (line 118) | def get_parameters(self, lr=1.):

FILE: tllib/vision/models/object_detection/proposal_generator/rpn.py
  class TLRPN (line 16) | class TLRPN(RPN):
    method __init__ (line 63) | def __init__(self, *args, **kwargs):
    method forward (line 66) | def forward(

FILE: tllib/vision/models/object_detection/roi_heads/roi_heads.py
  class TLRes5ROIHeads (line 17) | class TLRes5ROIHeads(Res5ROIHeads):
    method __init__ (line 60) | def __init__(self, *args, **kwargs):
    method forward (line 63) | def forward(self, images, features, proposals, targets=None, labeled=T...
    method sample_unlabeled_proposals (line 108) | def sample_unlabeled_proposals(
  class TLStandardROIHeads (line 127) | class TLStandardROIHeads(StandardROIHeads):
    method __init__ (line 177) | def __init__(self, *args, **kwargs):
    method forward (line 180) | def forward(self, images, features, proposals, targets=None, labeled=T...
    method _forward_box (line 208) | def _forward_box(self, features: Dict[str, torch.Tensor], proposals: L...
    method sample_unlabeled_proposals (line 250) | def sample_unlabeled_proposals(

FILE: tllib/vision/models/reid/identifier.py
  class ReIdentifier (line 11) | class ReIdentifier(nn.Module):
    method __init__ (line 20) | def __init__(self, backbone: nn.Module, num_classes: int, bottleneck: ...
    method features_dim (line 54) | def features_dim(self) -> int:
    method forward (line 58) | def forward(self, x: torch.Tensor):
    method get_parameters (line 67) | def get_parameters(self, base_lr=1.0, rate=0.1) -> List[Dict]:

FILE: tllib/vision/models/reid/loss.py
  function pairwise_euclidean_distance (line 11) | def pairwise_euclidean_distance(x, y):
  function hard_examples_mining (line 22) | def hard_examples_mining(dist_mat, identity_mat, return_idxes=False):
  class CrossEntropyLossWithLabelSmooth (line 52) | class CrossEntropyLossWithLabelSmooth(nn.Module):
    method __init__ (line 77) | def __init__(self, num_classes, epsilon=0.1):
    method forward (line 83) | def forward(self, y, labels):
  class TripletLoss (line 91) | class TripletLoss(nn.Module):
    method __init__ (line 101) | def __init__(self, margin, normalize_feature=False):
    method forward (line 107) | def forward(self, f, labels):
  class TripletLossXBM (line 122) | class TripletLossXBM(nn.Module):
    method __init__ (line 145) | def __init__(self, margin=0.3, normalize_feature=False):
    method forward (line 151) | def forward(self, f, labels, xbm_f, xbm_labels):
  class SoftTripletLoss (line 171) | class SoftTripletLoss(nn.Module):
    method __init__ (line 196) | def __init__(self, margin=None, normalize_feature=False):
    method forward (line 201) | def forward(self, features_1, features_2, labels):
  class CrossEntropyLoss (line 231) | class CrossEntropyLoss(nn.Module):
    method __init__ (line 248) | def __init__(self):
    method forward (line 252) | def forward(self, y, labels):

FILE: tllib/vision/models/reid/resnet.py
  class ReidResNet (line 10) | class ReidResNet(ResNet):
    method __init__ (line 17) | def __init__(self, *args, **kwargs):
    method forward (line 22) | def forward(self, x):
  function _reid_resnet (line 36) | def _reid_resnet(arch, block, layers, pretrained, progress, **kwargs):
  function reid_resnet18 (line 48) | def reid_resnet18(pretrained=False, progress=True, **kwargs):
  function reid_resnet34 (line 59) | def reid_resnet34(pretrained=False, progress=True, **kwargs):
  function reid_resnet50 (line 70) | def reid_resnet50(pretrained=False, progress=True, **kwargs):
  function reid_resnet101 (line 81) | def reid_resnet101(pretrained=False, progress=True, **kwargs):

FILE: tllib/vision/models/resnet.py
  class ResNet (line 18) | class ResNet(models.ResNet):
    method __init__ (line 21) | def __init__(self, *args, **kwargs):
    method forward (line 25) | def forward(self, x):
    method out_features (line 43) | def out_features(self) -> int:
    method copy_head (line 47) | def copy_head(self) -> nn.Module:
  function _resnet (line 52) | def _resnet(arch, block, layers, pretrained, progress, **kwargs):
  function resnet18 (line 64) | def resnet18(pretrained=False, progress=True, **kwargs):
  function resnet34 (line 76) | def resnet34(pretrained=False, progress=True, **kwargs):
  function resnet50 (line 88) | def resnet50(pretrained=False, progress=True, **kwargs):
  function resnet101 (line 100) | def resnet101(pretrained=False, progress=True, **kwargs):
  function resnet152 (line 112) | def resnet152(pretrained=False, progress=True, **kwargs):
  function resnext50_32x4d (line 124) | def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
  function resnext101_32x8d (line 138) | def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
  function wide_resnet50_2 (line 152) | def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
  function wide_resnet101_2 (line 170) | def wide_resnet101_2(pretrained=False, progress=True, **kwargs):

FILE: tllib/vision/models/segmentation/deeplabv2.py
  class Bottleneck (line 16) | class Bottleneck(nn.Module):
    method __init__ (line 19) | def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=...
    method forward (line 43) | def forward(self, x):
  class ASPP_V2 (line 66) | class ASPP_V2(nn.Module):
    method __init__ (line 67) | def __init__(self, inplanes, dilation_series, padding_series, num_clas...
    method forward (line 77) | def forward(self, x):
  class ResNet (line 85) | class ResNet(nn.Module):
    method __init__ (line 86) | def __init__(self, block, layers):
    method _make_layer (line 109) | def _make_layer(self, block, planes, blocks, stride=1, dilation=1):
    method forward (line 125) | def forward(self, x):
  class Deeplab (line 137) | class Deeplab(nn.Module):
    method __init__ (line 138) | def __init__(self, backbone, classifier, num_classes):
    method forward (line 144) | def forward(self, x):
    method get_1x_lr_params_NOscale (line 149) | def get_1x_lr_params_NOscale(self):
    method get_10x_lr_params (line 164) | def get_10x_lr_params(self):
    method get_parameters (line 172) | def get_parameters(self, lr=1.):
  function deeplabv2_resnet101 (line 179) | def deeplabv2_resnet101(num_classes=19, pretrained_backbone=True):

FILE: tllib/vision/transforms/__init__.py
  class ResizeImage (line 9) | class ResizeImage(object):
    method __init__ (line 18) | def __init__(self, size):
    method __call__ (line 24) | def __call__(self, img):
    method __repr__ (line 28) | def __repr__(self):
  class MultipleApply (line 32) | class MultipleApply:
    method __init__ (line 51) | def __init__(self, transforms):
    method __call__ (line 54) | def __call__(self, image):
    method __repr__ (line 57) | def __repr__(self):
  class Denormalize (line 66) | class Denormalize(Normalize):
    method __init__ (line 82) | def __init__(self, mean, std):
  class NormalizeAndTranspose (line 88) | class NormalizeAndTranspose:
    method __init__ (line 94) | def __init__(self, mean=(104.00698793, 116.66876762, 122.67891434)):
    method __call__ (line 97) | def __call__(self, image):
  class DeNormalizeAndTranspose (line 116) | class DeNormalizeAndTranspose:
    method __init__ (line 122) | def __init__(self, mean=(104.00698793, 116.66876762, 122.67891434)):
    method __call__ (line 125) | def __call__(self, image):
  class RandomErasing (line 134) | class RandomErasing(object):
    method __init__ (line 147) | def __init__(self, probability=0.5, sl=0.02, sh=0.4, r1=0.3, mean=(0.4...
    method __call__ (line 154) | def __call__(self, img):
    method __repr__ (line 181) | def __repr__(self):

FILE: tllib/vision/transforms/keypoint_detection.py
  function wrapper (line 17) | def wrapper(transform: ClassVar):
  function resize (line 39) | def resize(image: Image.Image, size: int, interpolation=Image.BILINEAR,
  function crop (line 55) | def crop(image: Image.Image, top, left, height, width, keypoint2d: np.nd...
  function resized_crop (line 63) | def resized_crop(img, top, left, height, width, size, interpolation=Imag...
  function center_crop (line 87) | def center_crop(image, output_size, keypoint2d: np.ndarray):
  function hflip (line 105) | def hflip(image: Image.Image, keypoint2d: np.ndarray):
  function rotate (line 113) | def rotate(image: Image.Image, angle, keypoint2d: np.ndarray):
  function resize_pad (line 131) | def resize_pad(img, keypoint2d, size, interpolation=Image.BILINEAR):
  class Compose (line 158) | class Compose(object):
    method __init__ (line 165) | def __init__(self, transforms):
    method __call__ (line 168) | def __call__(self, image, **kwargs):
  class GaussianBlur (line 174) | class GaussianBlur(object):
    method __init__ (line 175) | def __init__(self, low=0, high=0.8):
    method __call__ (line 179) | def __call__(self, image: Image, **kwargs):
  class Resize (line 185) | class Resize(object):
    method __init__ (line 189) | def __init__(self, size, interpolation=Image.BILINEAR):
    method __call__ (line 194) | def __call__(self, image, keypoint2d: np.ndarray, intrinsic_matrix: np...
  class ResizePad (line 202) | class ResizePad(object):
    method __init__ (line 205) | def __init__(self, size, interpolation=Image.BILINEAR):
    method __call__ (line 209) | def __call__(self, img, keypoint2d, **kwargs):
  class CenterCrop (line 215) | class CenterCrop(object):
    method __init__ (line 219) | def __init__(self, size):
    method __call__ (line 225) | def __call__(self, image, keypoint2d, **kwargs):
  class RandomRotation (line 240) | class RandomRotation(object):
    method __init__ (line 249) | def __init__(self, degrees):
    method get_params (line 261) | def get_params(degrees):
    method __call__ (line 271) | def __call__(self, image, keypoint2d, **kwargs):
  class RandomResizedCrop (line 289) | class RandomResizedCrop(object):
    method __init__ (line 304) | def __init__(self, size, scale=(0.6, 1.3), interpolation=Image.BILINEAR):
    method get_params (line 313) | def get_params(img, scale):
    method __call__ (line 342) | def __call__(self, image, keypoint2d: np.ndarray, intrinsic_matrix: np...
  class RandomApply (line 358) | class RandomApply(T.RandomTransforms):
    method __init__ (line 366) | def __init__(self, transforms, p=0.5):
    method __call__ (line 370) | def __call__(self, image, **kwargs):

FILE: tllib/vision/transforms/segmentation.py
  function wrapper (line 17) | def wrapper(transform: ClassVar):
  class Compose (line 42) | class Compose:
    method __init__ (line 54) | def __init__(self, transforms):
    method __call__ (line 58) | def __call__(self, image, target):
  class Resize (line 64) | class Resize(nn.Module):
    method __init__ (line 75) | def __init__(self, image_size, label_size=None):
    method forward (line 83) | def forward(self, image, label):
  class RandomCrop (line 98) | class RandomCrop(nn.Module):
    method __init__ (line 105) | def __init__(self, size):
    method forward (line 109) | def forward(self, image, label):
  class RandomHorizontalFlip (line 132) | class RandomHorizontalFlip(nn.Module):
    method __init__ (line 139) | def __init__(self, p=0.5):
    method forward (line 143) | def forward(self, image, label):
  class RandomResizedCrop (line 157) | class RandomResizedCrop(T.RandomResizedCrop):
    method __init__ (line 174) | def __init__(self, size, scale=(0.5, 1.0), ratio=(3. / 4., 4. / 3.), i...
    method get_params (line 178) | def get_params(
    method forward (line 222) | def forward(self, image, label):
  class RandomChoice (line 239) | class RandomChoice(T.RandomTransforms):
    method __call__ (line 242) | def __call__(self, image, label):
  class RandomApply (line 247) | class RandomApply(T.RandomTransforms):
    method __init__ (line 255) | def __init__(self, transforms, p=0.5):
    method __call__ (line 259) | def __call__(self, image, label):
Condensed preview — 445 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (2,819K chars).
[
  {
    "path": ".github/ISSUE_TEMPLATE/bug_report.md",
    "chars": 834,
    "preview": "---\nname: Bug report\nabout: Create a report to help us improve\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n**Describe the b"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/custom.md",
    "chars": 126,
    "preview": "---\nname: Custom issue template\nabout: Describe this issue template's purpose here.\ntitle: ''\nlabels: ''\nassignees: ''\n\n"
  },
  {
    "path": ".github/ISSUE_TEMPLATE/feature_request.md",
    "chars": 595,
    "preview": "---\nname: Feature request\nabout: Suggest an idea for this project\ntitle: ''\nlabels: ''\nassignees: ''\n\n---\n\n**Is your fea"
  },
  {
    "path": ".gitignore",
    "chars": 1882,
    "preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packagi"
  },
  {
    "path": "CONTRIBUTING.md",
    "chars": 518,
    "preview": "## Contributing to Transfer-Learning-Library\n\nAll kinds of contributions are welcome, including but not limited to the f"
  },
  {
    "path": "DATASETS.md",
    "chars": 2897,
    "preview": "## Notice (2023-08-01)\n\n### Transfer-Learning-Library Dataset Link Failure Issue\nDear users,\n\nWe sincerely apologize to "
  },
  {
    "path": "LICENSE",
    "chars": 1073,
    "preview": "Copyright (c) 2018 The Python Packaging Authority\n\nPermission is hereby granted, free of charge, to any person obtaining"
  },
  {
    "path": "README.md",
    "chars": 18886,
    "preview": "<div align='center' margin-bottom:40px> <img src=\"logo.png\" width=200/> </div>\n\n# Transfer Learning Library\n\n- [Introduc"
  },
  {
    "path": "docs/Makefile",
    "chars": 615,
    "preview": "# Minimal makefile for Sphinx documentation\n#\n\n# You can set these variables from the command line.\nSPHINXOPTS    =\nSPHI"
  },
  {
    "path": "docs/conf.py",
    "chars": 9100,
    "preview": "import sys\nimport os\n\nsys.path.append(os.path.abspath('..'))\nsys.path.append(os.path.abspath('./demo/'))\n\nfrom pytorch_s"
  },
  {
    "path": "docs/index.rst",
    "chars": 475,
    "preview": "=====================================\nTransfer Learning\n=====================================\n\n.. toctree::\n    :maxdept"
  },
  {
    "path": "docs/make.bat",
    "chars": 824,
    "preview": "@ECHO OFF\n\npushd %~dp0\n\nREM Command file for Sphinx documentation\n\nif \"%SPHINXBUILD%\" == \"\" (\n\tset SPHINXBUILD=python -m"
  },
  {
    "path": "docs/requirements.txt",
    "chars": 32,
    "preview": "sphinxcontrib-httpdomain\nsphinx\n"
  },
  {
    "path": "docs/tllib/alignment/domain_adversarial.rst",
    "chars": 2130,
    "preview": "==========================================\nDomain Adversarial Training\n==========================================\n\n\n.. _"
  },
  {
    "path": "docs/tllib/alignment/hypothesis_adversarial.rst",
    "chars": 1170,
    "preview": "==========================================\nHypothesis Adversarial Learning\n==========================================\n\n\n"
  },
  {
    "path": "docs/tllib/alignment/index.rst",
    "chars": 249,
    "preview": "=====================================\nFeature Alignment\n=====================================\n\n.. toctree::\n    :maxdept"
  },
  {
    "path": "docs/tllib/alignment/statistics_matching.rst",
    "chars": 561,
    "preview": "=====================\nStatistics Matching\n=====================\n\n\n.. _DAN:\n\nDAN: Deep Adaptation Network\n---------------"
  },
  {
    "path": "docs/tllib/modules.rst",
    "chars": 858,
    "preview": "=====================\nModules\n=====================\n\n\nClassifier\n-------------------------------\n.. autoclass:: tllib.mo"
  },
  {
    "path": "docs/tllib/normalization.rst",
    "chars": 1224,
    "preview": "=====================\nNormalization\n=====================\n\n\n\n.. _AFN:\n\nAFN: Adaptive Feature Norm\n----------------------"
  },
  {
    "path": "docs/tllib/ranking.rst",
    "chars": 641,
    "preview": "=====================\nRanking\n=====================\n\n\n\n.. _H_score:\n\nH-score\n-------------------------------------------"
  },
  {
    "path": "docs/tllib/regularization.rst",
    "chars": 1268,
    "preview": "===========================================\nRegularization\n===========================================\n\n.. _L2:\n\nL2\n----"
  },
  {
    "path": "docs/tllib/reweight.rst",
    "chars": 793,
    "preview": "=======================================\nRe-weighting\n=======================================\n\n\n.. _PADA:\n\nPADA: Partial "
  },
  {
    "path": "docs/tllib/self_training.rst",
    "chars": 3800,
    "preview": "=======================================\nSelf Training Methods\n=======================================\n\n\n.. _PseudoLabel:"
  },
  {
    "path": "docs/tllib/translation.rst",
    "chars": 2405,
    "preview": "=======================================\nDomain Translation\n=======================================\n\n\n.. _CycleGAN:\n\n----"
  },
  {
    "path": "docs/tllib/utils/analysis.rst",
    "chars": 221,
    "preview": "==============\nAnalysis Tools\n==============\n\n\n.. autofunction:: tllib.utils.analysis.collect_feature\n\n\n.. autofunction:"
  },
  {
    "path": "docs/tllib/utils/base.rst",
    "chars": 754,
    "preview": "Generic Tools\n==============\n\n\nAverage Meter\n---------------------------------\n\n.. autoclass:: tllib.utils.meter.Average"
  },
  {
    "path": "docs/tllib/utils/index.rst",
    "chars": 191,
    "preview": "=====================================\nUtilities\n=====================================\n\n.. toctree::\n    :maxdepth: 2\n   "
  },
  {
    "path": "docs/tllib/utils/metric.rst",
    "chars": 302,
    "preview": "===========\nMetrics\n===========\n\nClassification & Segmentation\n==============================\n\n\nAccuracy\n---------------"
  },
  {
    "path": "docs/tllib/vision/datasets.rst",
    "chars": 9127,
    "preview": "Datasets\n=============================\n\nCross-Domain Classification\n----------------------------------------------------"
  },
  {
    "path": "docs/tllib/vision/index.rst",
    "chars": 191,
    "preview": "=====================================\nVision\n=====================================\n\n.. toctree::\n    :maxdepth: 2\n    :c"
  },
  {
    "path": "docs/tllib/vision/models.rst",
    "chars": 2211,
    "preview": "Models\n===========================\n\n------------------------------\nImage Classification\n------------------------------\n\n"
  },
  {
    "path": "docs/tllib/vision/transforms.rst",
    "chars": 392,
    "preview": "Transforms\n=============================\n\n\nClassification\n---------------------------------\n\n.. automodule:: tllib.visio"
  },
  {
    "path": "examples/domain_adaptation/image_classification/README.md",
    "chars": 16847,
    "preview": "# Unsupervised Domain Adaptation for Image Classification\n\n## Installation\n\nIt’s suggested to use **pytorch==1.7.1** and"
  },
  {
    "path": "examples/domain_adaptation/image_classification/adda.py",
    "chars": 15372,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\nNote: Our implementation is different from ADDA paper in seve"
  },
  {
    "path": "examples/domain_adaptation/image_classification/adda.sh",
    "chars": 10406,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python adda.py data/office31 -d Office31 "
  },
  {
    "path": "examples/domain_adaptation/image_classification/afn.py",
    "chars": 12741,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_adaptation/image_classification/afn.sh",
    "chars": 10910,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python afn.py data/office31 -d Office31 -"
  },
  {
    "path": "examples/domain_adaptation/image_classification/bsp.py",
    "chars": 15172,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_adaptation/image_classification/bsp.sh",
    "chars": 10379,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python bsp.py data/office31 -d Office31 -"
  },
  {
    "path": "examples/domain_adaptation/image_classification/cc_loss.py",
    "chars": 14531,
    "preview": "\"\"\"\n@author: Ying Jin\n@contact: sherryying003@gmail.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse\nim"
  },
  {
    "path": "examples/domain_adaptation/image_classification/cc_loss.sh",
    "chars": 12189,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python cc_loss.py data/office31 -d Office"
  },
  {
    "path": "examples/domain_adaptation/image_classification/cdan.py",
    "chars": 13775,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/image_classification/cdan.sh",
    "chars": 10696,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python cdan.py data/office31 -d Office31 "
  },
  {
    "path": "examples/domain_adaptation/image_classification/dan.py",
    "chars": 12660,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/image_classification/dan.sh",
    "chars": 10370,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python dan.py data/office31 -d Office31 -"
  },
  {
    "path": "examples/domain_adaptation/image_classification/dann.py",
    "chars": 12804,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/image_classification/dann.sh",
    "chars": 10394,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python dann.py data/office31 -d Office31 "
  },
  {
    "path": "examples/domain_adaptation/image_classification/erm.py",
    "chars": 9855,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport warnings\nimport argparse\nim"
  },
  {
    "path": "examples/domain_adaptation/image_classification/erm.sh",
    "chars": 11086,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python erm.py data/office31 -d Office31 -"
  },
  {
    "path": "examples/domain_adaptation/image_classification/fixmatch.py",
    "chars": 15987,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_adaptation/image_classification/fixmatch.sh",
    "chars": 3751,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python fixmatch.py data/office31 -d Offic"
  },
  {
    "path": "examples/domain_adaptation/image_classification/jan.py",
    "chars": 13357,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/image_classification/jan.sh",
    "chars": 10264,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python jan.py data/office31 -d Office31 -"
  },
  {
    "path": "examples/domain_adaptation/image_classification/mcc.py",
    "chars": 12547,
    "preview": "\"\"\"\n@author: Ying Jin\n@contact: sherryying003@gmail.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse\nim"
  },
  {
    "path": "examples/domain_adaptation/image_classification/mcc.sh",
    "chars": 11725,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python mcc.py data/office31 -d Office31 -"
  },
  {
    "path": "examples/domain_adaptation/image_classification/mcd.py",
    "chars": 15479,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/image_classification/mcd.sh",
    "chars": 9839,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\n# We found MCD loss is sensitive to class number,\n# thus, when t"
  },
  {
    "path": "examples/domain_adaptation/image_classification/mdd.py",
    "chars": 12734,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/image_classification/mdd.sh",
    "chars": 11382,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python mdd.py data/office31 -d Office31 -"
  },
  {
    "path": "examples/domain_adaptation/image_classification/requirements.txt",
    "chars": 4,
    "preview": "timm"
  },
  {
    "path": "examples/domain_adaptation/image_classification/self_ensemble.py",
    "chars": 14186,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_adaptation/image_classification/self_ensemble.sh",
    "chars": 5438,
    "preview": "#!/usr/bin/env bash\n# ResNet50, Office31, Single Source\nCUDA_VISIBLE_DEVICES=0 python self_ensemble.py data/office31 -d "
  },
  {
    "path": "examples/domain_adaptation/image_classification/utils.py",
    "chars": 9710,
    "preview": "\"\"\"\n@author: Junguang Jiang, Baixu Chen\n@contact: JiangJunguang1123@outlook.com, cbx_99_hasta@outlook.com\n\"\"\"\nimport sys"
  },
  {
    "path": "examples/domain_adaptation/image_regression/README.md",
    "chars": 2895,
    "preview": "# Unsupervised Domain Adaptation for Image Regression Tasks\nIt’s suggested to use **pytorch==1.7.1** and torchvision==0."
  },
  {
    "path": "examples/domain_adaptation/image_regression/dann.py",
    "chars": 12280,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/image_regression/dann.sh",
    "chars": 1722,
    "preview": "# DSprites\nCUDA_VISIBLE_DEVICES=0 python dann.py data/dSprites -d DSprites -s C -t N -a resnet18 --epochs 40 --seed 0 --"
  },
  {
    "path": "examples/domain_adaptation/image_regression/dd.py",
    "chars": 14172,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/image_regression/dd.sh",
    "chars": 2064,
    "preview": "# DSprites\nCUDA_VISIBLE_DEVICES=0 python dd.py data/dSprites -d DSprites -s C -t N -a resnet18 --epochs 40 --seed 0 -b 1"
  },
  {
    "path": "examples/domain_adaptation/image_regression/erm.py",
    "chars": 11093,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/image_regression/erm.sh",
    "chars": 1668,
    "preview": "# DSprites\nCUDA_VISIBLE_DEVICES=0 python erm.py data/dSprites -d DSprites -s C -t N -a resnet18 --epochs 20 --seed 0 -b "
  },
  {
    "path": "examples/domain_adaptation/image_regression/rsd.py",
    "chars": 11804,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/image_regression/rsd.sh",
    "chars": 1698,
    "preview": "# DSprites\nCUDA_VISIBLE_DEVICES=0 python rsd.py data/dSprites -d DSprites -s C -t N -a resnet18 --epochs 40 --seed 0 --l"
  },
  {
    "path": "examples/domain_adaptation/image_regression/utils.py",
    "chars": 2160,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport sys\nimport time\nimport torch\nimport torch"
  },
  {
    "path": "examples/domain_adaptation/keypoint_detection/README.md",
    "chars": 3720,
    "preview": "# Unsupervised Domain Adaptation for Keypoint Detection\nIt’s suggested to use **pytorch==1.7.1** and torchvision==0.8.2 "
  },
  {
    "path": "examples/domain_adaptation/keypoint_detection/erm.py",
    "chars": 13960,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/keypoint_detection/erm.sh",
    "chars": 961,
    "preview": "# Source Only\n# Hands Dataset\nCUDA_VISIBLE_DEVICES=0 python erm.py data/RHD data/H3D_crop \\\n    -s RenderedHandPose -t H"
  },
  {
    "path": "examples/domain_adaptation/keypoint_detection/regda.py",
    "chars": 22989,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/keypoint_detection/regda.sh",
    "chars": 662,
    "preview": "# Hands Dataset\nCUDA_VISIBLE_DEVICES=0 python regda.py data/RHD data/H3D_crop \\\n    -s RenderedHandPose -t Hand3DStudio "
  },
  {
    "path": "examples/domain_adaptation/keypoint_detection/regda_fast.py",
    "chars": 22958,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/keypoint_detection/regda_fast.sh",
    "chars": 906,
    "preview": "# regda_fast is provided by https://github.com/YouJiacheng?tab=repositories\n# On single V100(16G), overall adversarial t"
  },
  {
    "path": "examples/domain_adaptation/object_detection/README.md",
    "chars": 13217,
    "preview": "# Unsupervised Domain Adaptation for Object Detection\n\n## Updates\n- *04/2022*: Provide CycleGAN translated datasets.\n\n\n#"
  },
  {
    "path": "examples/domain_adaptation/object_detection/config/faster_rcnn_R_101_C4_cityscapes.yaml",
    "chars": 802,
    "preview": "MODEL:\n  META_ARCHITECTURE: \"TLGeneralizedRCNN\"\n  WEIGHTS: \"detectron2://ImageNetPretrained/MSRA/R-101.pkl\"\n  MASK_ON: F"
  },
  {
    "path": "examples/domain_adaptation/object_detection/config/faster_rcnn_R_101_C4_voc.yaml",
    "chars": 824,
    "preview": "MODEL:\n  META_ARCHITECTURE: \"TLGeneralizedRCNN\"\n  WEIGHTS: \"detectron2://ImageNetPretrained/MSRA/R-101.pkl\"\n  MASK_ON: F"
  },
  {
    "path": "examples/domain_adaptation/object_detection/config/faster_rcnn_vgg_16_cityscapes.yaml",
    "chars": 1562,
    "preview": "MODEL:\r\n  META_ARCHITECTURE: \"TLGeneralizedRCNN\"\r\n  WEIGHTS: 'https://open-mmlab.oss-cn-beijing.aliyuncs.com/pretrain/vg"
  },
  {
    "path": "examples/domain_adaptation/object_detection/config/retinanet_R_101_FPN_voc.yaml",
    "chars": 975,
    "preview": "MODEL:\n  META_ARCHITECTURE: \"TLRetinaNet\"\n  WEIGHTS: \"detectron2://ImageNetPretrained/MSRA/R-101.pkl\"\n  BACKBONE:\n    NA"
  },
  {
    "path": "examples/domain_adaptation/object_detection/cycle_gan.py",
    "chars": 19957,
    "preview": "\"\"\"\nCycleGAN for VOC-format Object Detection Dataset\nYou need to modify function build_dataset if you want to use your o"
  },
  {
    "path": "examples/domain_adaptation/object_detection/cycle_gan.sh",
    "chars": 6240,
    "preview": "# VOC to Clipart\nmkdir datasets/VOC2007_to_clipart\ncp -r datasets/VOC2007/* datasets/VOC2007_to_clipart\nmkdir datasets/V"
  },
  {
    "path": "examples/domain_adaptation/object_detection/d_adapt/README.md",
    "chars": 1783,
    "preview": "# Decoupled Adaptation for Cross-Domain Object Detection\n\n## Installation\nOur code is based on \n- [Detectron latest(v0.6"
  },
  {
    "path": "examples/domain_adaptation/object_detection/d_adapt/bbox_adaptation.py",
    "chars": 24465,
    "preview": "\"\"\"\nTraining a bounding box adaptor\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nim"
  },
  {
    "path": "examples/domain_adaptation/object_detection/d_adapt/category_adaptation.py",
    "chars": 19754,
    "preview": "\"\"\"\nTraining a category adaptor\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport"
  },
  {
    "path": "examples/domain_adaptation/object_detection/d_adapt/config/faster_rcnn_R_101_C4_cityscapes.yaml",
    "chars": 862,
    "preview": "MODEL:\n  META_ARCHITECTURE: \"DecoupledGeneralizedRCNN\"\n  WEIGHTS: \"detectron2://ImageNetPretrained/MSRA/R-101.pkl\"\n  MAS"
  },
  {
    "path": "examples/domain_adaptation/object_detection/d_adapt/config/faster_rcnn_R_101_C4_voc.yaml",
    "chars": 886,
    "preview": "MODEL:\n  META_ARCHITECTURE: \"DecoupledGeneralizedRCNN\"\n  WEIGHTS: \"detectron2://ImageNetPretrained/MSRA/R-101.pkl\"\n  MAS"
  },
  {
    "path": "examples/domain_adaptation/object_detection/d_adapt/config/faster_rcnn_vgg_16_cityscapes.yaml",
    "chars": 1401,
    "preview": "MODEL:\r\n  META_ARCHITECTURE: \"DecoupledGeneralizedRCNN\"\r\n  WEIGHTS: 'https://open-mmlab.oss-cn-beijing.aliyuncs.com/pret"
  },
  {
    "path": "examples/domain_adaptation/object_detection/d_adapt/config/retinanet_R_101_FPN_voc.yaml",
    "chars": 887,
    "preview": "MODEL:\n  META_ARCHITECTURE: \"DecoupledRetinaNet\"\n  WEIGHTS: \"detectron2://ImageNetPretrained/MSRA/R-101.pkl\"\n  BACKBONE:"
  },
  {
    "path": "examples/domain_adaptation/object_detection/d_adapt/d_adapt.py",
    "chars": 15523,
    "preview": "\"\"\"\n`D-adapt: Decoupled Adaptation for Cross-Domain Object Detection <https://openreview.net/pdf?id=VNqaB1g9393>`_.\n@aut"
  },
  {
    "path": "examples/domain_adaptation/object_detection/d_adapt/d_adapt.sh",
    "chars": 9007,
    "preview": "# ResNet101 Based Faster RCNN: Faster RCNN: VOC->Clipart\n# 44.8\npretrained_models=../logs/source_only/faster_rcnn_R_101_"
  },
  {
    "path": "examples/domain_adaptation/object_detection/oracle.sh",
    "chars": 2349,
    "preview": "# Faster RCNN: WaterColor\r\nCUDA_VISIBLE_DEVICES=0 python source_only.py \\\r\n  --config-file config/faster_rcnn_R_101_C4_v"
  },
  {
    "path": "examples/domain_adaptation/object_detection/prepare_cityscapes_to_voc.py",
    "chars": 6006,
    "preview": "from pascal_voc_writer import Writer\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport os\r\nimport json\r\nimpor"
  },
  {
    "path": "examples/domain_adaptation/object_detection/requirements.txt",
    "chars": 40,
    "preview": "mmcv\ntimm\nprettytable\npascal_voc_writer\n"
  },
  {
    "path": "examples/domain_adaptation/object_detection/source_only.py",
    "chars": 7513,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport logging\nimport os\nimport argparse\nimport "
  },
  {
    "path": "examples/domain_adaptation/object_detection/source_only.sh",
    "chars": 3760,
    "preview": "# Faster RCNN: VOC->Clipart\nCUDA_VISIBLE_DEVICES=0 python source_only.py \\\n  --config-file config/faster_rcnn_R_101_C4_v"
  },
  {
    "path": "examples/domain_adaptation/object_detection/utils.py",
    "chars": 15808,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport numpy as np\nimport os\nimport prettytable\n"
  },
  {
    "path": "examples/domain_adaptation/object_detection/visualize.py",
    "chars": 5963,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport os\nimport argparse\nimport sys\n\nimport torch\nfrom d"
  },
  {
    "path": "examples/domain_adaptation/object_detection/visualize.sh",
    "chars": 704,
    "preview": "# Source Only Faster RCNN: VOC->Clipart\nCUDA_VISIBLE_DEVICES=0 python visualize.py --config-file config/faster_rcnn_R_10"
  },
  {
    "path": "examples/domain_adaptation/openset_domain_adaptation/README.md",
    "chars": 3922,
    "preview": "# Open-set Domain Adaptation for Image Classification\n\n## Installation\nIt’s suggested to use **pytorch==1.7.1** and torc"
  },
  {
    "path": "examples/domain_adaptation/openset_domain_adaptation/dann.py",
    "chars": 13362,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/openset_domain_adaptation/dann.sh",
    "chars": 2936,
    "preview": "#!/usr/bin/env bash\n# Office31\nCUDA_VISIBLE_DEVICES=0 python dann.py data/office31 -d Office31 -s A -t W -a resnet50 --e"
  },
  {
    "path": "examples/domain_adaptation/openset_domain_adaptation/erm.py",
    "chars": 11724,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/openset_domain_adaptation/erm.sh",
    "chars": 2809,
    "preview": "#!/usr/bin/env bash\n# Office31\nCUDA_VISIBLE_DEVICES=0 python erm.py data/office31 -d Office31 -s A -t W -a resnet50 --ep"
  },
  {
    "path": "examples/domain_adaptation/openset_domain_adaptation/osbp.py",
    "chars": 12402,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/openset_domain_adaptation/osbp.sh",
    "chars": 2847,
    "preview": "#!/usr/bin/env bash\n# Office31\nCUDA_VISIBLE_DEVICES=0 python osbp.py data/office31 -d Office31 -s A -t W -a resnet50 --e"
  },
  {
    "path": "examples/domain_adaptation/openset_domain_adaptation/utils.py",
    "chars": 5585,
    "preview": "import sys\nimport timm\nimport torch.nn as nn\nimport torchvision.transforms as T\n\nsys.path.append('../../..')\nimport tlli"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/README.md",
    "chars": 5452,
    "preview": "# Partial Domain Adaptation for Image Classification\n\n## Installation\nIt’s suggested to use **pytorch==1.7.1** and torch"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/afn.py",
    "chars": 12049,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport sys\nimpo"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/afn.sh",
    "chars": 2962,
    "preview": "#!/usr/bin/env bash\n# Office31\nCUDA_VISIBLE_DEVICES=0 python afn.py data/office31 -d Office31 -s A -t W -a resnet50 --tr"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/dann.py",
    "chars": 11715,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/dann.sh",
    "chars": 3155,
    "preview": "#!/usr/bin/env bash\n# Office31\nCUDA_VISIBLE_DEVICES=0 python dann.py data/office31 -d Office31 -s A -t W -a resnet50 --e"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/erm.py",
    "chars": 9947,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/erm.sh",
    "chars": 3175,
    "preview": "#!/usr/bin/env bash\n# Office31\nCUDA_VISIBLE_DEVICES=0 python erm.py data/office31 -d Office31 -s A -t W -a resnet50 --ep"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/iwan.py",
    "chars": 13712,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport sys\nimpo"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/iwan.sh",
    "chars": 3152,
    "preview": "#!/usr/bin/env bash\n# Office31\nCUDA_VISIBLE_DEVICES=0 python iwan.py data/office31 -d Office31 -s A -t W -a resnet50 --l"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/pada.py",
    "chars": 13192,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/pada.sh",
    "chars": 3526,
    "preview": "#!/usr/bin/env bash\n# Office31\nCUDA_VISIBLE_DEVICES=0 python pada.py data/office31 -d Office31 -s A -t W -a resnet50 --e"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/requirements.txt",
    "chars": 4,
    "preview": "timm"
  },
  {
    "path": "examples/domain_adaptation/partial_domain_adaptation/utils.py",
    "chars": 6964,
    "preview": "import sys\nimport time\nimport timm\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torchvision"
  },
  {
    "path": "examples/domain_adaptation/re_identification/README.md",
    "chars": 4803,
    "preview": "# Unsupervised Domain Adaptation for Person Re-Identification\n\n## Installation\n\nIt’s suggested to use **pytorch==1.7.1**"
  },
  {
    "path": "examples/domain_adaptation/re_identification/baseline.py",
    "chars": 13354,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_adaptation/re_identification/baseline.sh",
    "chars": 1245,
    "preview": "#!/usr/bin/env bash\n# Market1501 -> Duke\nCUDA_VISIBLE_DEVICES=0 python baseline.py data data -s Market1501 -t DukeMTMC -"
  },
  {
    "path": "examples/domain_adaptation/re_identification/baseline_cluster.py",
    "chars": 16745,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_adaptation/re_identification/baseline_cluster.sh",
    "chars": 3221,
    "preview": "#!/usr/bin/env bash\n# Market1501 -> Duke\n# step1: pretrain\nCUDA_VISIBLE_DEVICES=0 python baseline.py data data -s Market"
  },
  {
    "path": "examples/domain_adaptation/re_identification/ibn.sh",
    "chars": 2303,
    "preview": "#!/usr/bin/env bash\n# Market1501 -> Duke\nCUDA_VISIBLE_DEVICES=0 python baseline.py data data -s Market1501 -t DukeMTMC -"
  },
  {
    "path": "examples/domain_adaptation/re_identification/mmt.py",
    "chars": 22952,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_adaptation/re_identification/mmt.sh",
    "chars": 4479,
    "preview": "#!/usr/bin/env bash\n# Market1501 -> Duke\n# step1: pretrain\nCUDA_VISIBLE_DEVICES=0 python baseline.py data data -s Market"
  },
  {
    "path": "examples/domain_adaptation/re_identification/requirements.txt",
    "chars": 18,
    "preview": "timm\nopencv-python"
  },
  {
    "path": "examples/domain_adaptation/re_identification/spgan.py",
    "chars": 20080,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport sys\nimpo"
  },
  {
    "path": "examples/domain_adaptation/re_identification/spgan.sh",
    "chars": 2543,
    "preview": "# Market1501 -> Duke\n# step1: train SPGAN\nCUDA_VISIBLE_DEVICES=0 python spgan.py data -s Market1501 -t DukeMTMC \\\n--log "
  },
  {
    "path": "examples/domain_adaptation/re_identification/utils.py",
    "chars": 8343,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport sys\nimport timm\nimport numpy as np\nimport torch\nim"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/README.md",
    "chars": 8523,
    "preview": "# Unsupervised Domain Adaptation for Semantic Segmentation\nIt’s suggested to use **pytorch==1.7.1** and torchvision==0.8"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/advent.py",
    "chars": 16667,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/advent.sh",
    "chars": 479,
    "preview": "# GTA5 to Cityscapes\nCUDA_VISIBLE_DEVICES=0 python advent.py data/GTA5 data/Cityscapes -s GTA5 -t Cityscapes \\\n    --log"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/cycada.py",
    "chars": 18951,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/cycada.sh",
    "chars": 1637,
    "preview": "# GTA5 to Cityscapes\n# First, train the CycleGAN\nCUDA_VISIBLE_DEVICES=0 python cycada.py data/GTA5 data/Cityscapes -s GT"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/cycle_gan.py",
    "chars": 15437,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/cycle_gan.sh",
    "chars": 977,
    "preview": "# GTA5 to Cityscapes\n# First, train the CycleGAN\nCUDA_VISIBLE_DEVICES=0 python cycle_gan.py data/GTA5 data/Cityscapes -s"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/erm.py",
    "chars": 13619,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/erm.sh",
    "chars": 846,
    "preview": "# Source Only\n# GTA5 to Cityscapes\nCUDA_VISIBLE_DEVICES=0 python erm.py data/GTA5 data/Cityscapes \\\n    -s GTA5 -t Citys"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/fda.py",
    "chars": 17073,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport"
  },
  {
    "path": "examples/domain_adaptation/semantic_segmentation/fda.sh",
    "chars": 496,
    "preview": "# GTA5 to Cityscapes\nCUDA_VISIBLE_DEVICES=0 python fda.py data/GTA5 data/Cityscapes -s GTA5 -t Cityscapes \\\n    --log lo"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/README.md",
    "chars": 5721,
    "preview": "# Unsupervised Domain Adaptation for WILDS (Image Classification)\n\n## Installation\n\nIt’s suggested to use **pytorch==1.9"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/cdan.py",
    "chars": 19839,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport argparse\nimport os\nimport shutil\nimport t"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/cdan.sh",
    "chars": 509,
    "preview": "CUDA_VISIBLE_DEVICES=0 python cdan.py data/wilds -d \"fmow\" --aa \"v0\" --arch \"densenet121\" \\\n  --lr 0.1 --opt-level O1 --"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/dan.py",
    "chars": 18572,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport argparse\nimport os\nimport shutil\nimport t"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/dan.sh",
    "chars": 509,
    "preview": "CUDA_VISIBLE_DEVICES=0 python dan.py data/wilds -d \"fmow\" --aa \"v0\" --arch \"densenet121\" \\\n  --lr 0.1 --opt-level O1 --d"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/dann.py",
    "chars": 19165,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport argparse\nimport os\nimport shutil\nimport t"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/dann.sh",
    "chars": 509,
    "preview": "CUDA_VISIBLE_DEVICES=0 python dann.py data/wilds -d \"fmow\" --aa \"v0\" --arch \"densenet121\" \\\n  --lr 0.1 --opt-level O1 --"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/erm.py",
    "chars": 16671,
    "preview": "\"\"\"\nAdapted from https://github.com/NVIDIA/apex/tree/master/examples\n@author: Junguang Jiang\n@contact: JiangJunguang1123"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/erm.sh",
    "chars": 489,
    "preview": "CUDA_VISIBLE_DEVICES=0 python erm.py data/wilds -d \"fmow\" --aa \"v0\" --arch \"densenet121\" \\\n  --lr 0.1 --opt-level O1 --d"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/fixmatch.py",
    "chars": 19943,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport argparse\nimport os\nimport shutil\nimport time\nimpor"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/fixmatch.sh",
    "chars": 513,
    "preview": "CUDA_VISIBLE_DEVICES=0 python fixmatch.py data/wilds -d \"fmow\" --aa \"v0\" --arch \"densenet121\" \\\n  --lr 0.1 --opt-level O"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/jan.py",
    "chars": 18808,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport argparse\nimport os\nimport shutil\nimport t"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/jan.sh",
    "chars": 509,
    "preview": "CUDA_VISIBLE_DEVICES=0 python jan.py data/wilds -d \"fmow\" --aa \"v0\" --arch \"densenet121\" \\\n  --lr 0.1 --opt-level O1 --d"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/mdd.py",
    "chars": 18444,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport argparse\nimport os\nimport shutil\nimport t"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/mdd.sh",
    "chars": 509,
    "preview": "CUDA_VISIBLE_DEVICES=0 python mdd.py data/wilds -d \"fmow\" --aa \"v0\" --arch \"densenet121\" \\\n  --lr 0.1 --opt-level O1 --d"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/requirements.txt",
    "chars": 34,
    "preview": "wilds\ntimm\ntensorflow\ntensorboard\n"
  },
  {
    "path": "examples/domain_adaptation/wilds_image_classification/utils.py",
    "chars": 11272,
    "preview": "\"\"\"\n@author: Junguang Jiang\n@contact: JiangJunguang1123@outlook.com\n\"\"\"\nimport time\nimport math\nimport matplotlib.pyplot"
  },
  {
    "path": "examples/domain_adaptation/wilds_ogb_molpcba/README.md",
    "chars": 1125,
    "preview": "# Unsupervised Domain Adaptation for WILDS (Molecule classification)\n\n## Installation\n\nIt's suggested to use **pytorch=="
  },
  {
    "path": "examples/domain_adaptation/wilds_ogb_molpcba/erm.py",
    "chars": 7843,
    "preview": "\"\"\"\n@author: Jiaxin Li\n@contact: thulijx@gmail.com\n\"\"\"\nimport argparse\nimport shutil\nimport time\nimport pprint\n\nimport t"
  },
  {
    "path": "examples/domain_adaptation/wilds_ogb_molpcba/erm.sh",
    "chars": 168,
    "preview": "# ogb-molpcba\nCUDA_VISIBLE_DEVICES=0 python erm.py data/wilds --lr 3e-2 -b 4096 4096 --epochs 200 \\\n  --seed 0 --determi"
  },
  {
    "path": "examples/domain_adaptation/wilds_ogb_molpcba/gin.py",
    "chars": 7002,
    "preview": "\"\"\"\nAdapted from \"https://github.com/p-lambda/wilds\"\n@author: Jiaxin Li\n@contact: thulijx@gmail.com\n\"\"\"\nimport torch\nfro"
  },
  {
    "path": "examples/domain_adaptation/wilds_ogb_molpcba/requirements.txt",
    "chars": 48,
    "preview": "torch_geometric\nwilds\ntensorflow\ntensorboard\nogb"
  },
  {
    "path": "examples/domain_adaptation/wilds_ogb_molpcba/utils.py",
    "chars": 5132,
    "preview": "\"\"\"\n@author: Jiaxin Li\n@contact: thulijx@gmail.com\n\"\"\"\nimport time\nimport sys\n\nimport torch\nimport torch.nn as nn\nfrom t"
  },
  {
    "path": "examples/domain_adaptation/wilds_poverty/README.md",
    "chars": 2183,
    "preview": "# Unsupervised Domain Adaptation for WILDS (Image Regression)\n\n## Installation\n\nIt's suggested to use **pytorch==1.10.1*"
  },
  {
    "path": "examples/domain_adaptation/wilds_poverty/erm.py",
    "chars": 13125,
    "preview": "\"\"\"\n@author: Jiaxin Li\n@contact: thulijx@gmail.com\n\"\"\"\nimport argparse\nimport os\nimport shutil\nimport time\nimport pprint"
  },
  {
    "path": "examples/domain_adaptation/wilds_poverty/erm.sh",
    "chars": 1019,
    "preview": "# official split scheme\nCUDA_VISIBLE_DEVICES=0 python erm.py data/wilds --split-scheme official --fold A \\\n  --arch 'res"
  },
  {
    "path": "examples/domain_adaptation/wilds_poverty/requirements.txt",
    "chars": 28,
    "preview": "wilds\ntensorflow\ntensorboard"
  },
  {
    "path": "examples/domain_adaptation/wilds_poverty/resnet_ms.py",
    "chars": 1986,
    "preview": "\"\"\"\nModified based on torchvision.models.resnet\n@author: Jiaxin Li\n@contact: thulijx@gmail.com\n\"\"\"\nimport torch.nn as nn"
  },
  {
    "path": "examples/domain_adaptation/wilds_poverty/utils.py",
    "chars": 8474,
    "preview": "\"\"\"\n@author: Jiaxin Li\n@contact: thulijx@gmail.com\n\"\"\"\nimport time\nimport sys\n\nfrom typing import Tuple, Optional, List,"
  },
  {
    "path": "examples/domain_adaptation/wilds_text/README.md",
    "chars": 1564,
    "preview": "# Unsupervised Domain Adaptation for WILDS (Text Classification)\n\n## Installation\n\nIt's suggested to use **pytorch==1.10"
  },
  {
    "path": "examples/domain_adaptation/wilds_text/erm.py",
    "chars": 10321,
    "preview": "\"\"\"\n@author: Jiaxin Li\n@contact: thulijx@gmail.com\n\"\"\"\nimport argparse\nimport shutil\nimport time\nimport pprint\n\nimport t"
  },
  {
    "path": "examples/domain_adaptation/wilds_text/erm.sh",
    "chars": 484,
    "preview": "# civilcomments\nCUDA_VISIBLE_DEVICES=0 python erm.py data/wilds -d \"civilcomments\" --unlabeled-list \"extra_unlabeled\" \\\n"
  },
  {
    "path": "examples/domain_adaptation/wilds_text/requirements.txt",
    "chars": 41,
    "preview": "wilds\ntensorflow\ntensorboard\ntransformers"
  },
  {
    "path": "examples/domain_adaptation/wilds_text/utils.py",
    "chars": 6050,
    "preview": "\"\"\"\n@author: Jiaxin Li\n@contact: thulijx@gmail.com\n\"\"\"\nimport time\nimport sys\n\nimport torch\nimport torch.distributed as "
  },
  {
    "path": "examples/domain_generalization/image_classification/README.md",
    "chars": 5887,
    "preview": "# Domain Generalization for Image Classification\n\n## Installation\nIt’s suggested to use **pytorch==1.7.1** and torchvisi"
  },
  {
    "path": "examples/domain_generalization/image_classification/coral.py",
    "chars": 12764,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_generalization/image_classification/coral.sh",
    "chars": 2075,
    "preview": "#!/usr/bin/env bash\n# ResNet50, PACS\nCUDA_VISIBLE_DEVICES=0 python coral.py data/PACS -d PACS -s A C S -t P -a resnet50 "
  },
  {
    "path": "examples/domain_generalization/image_classification/erm.py",
    "chars": 10788,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_generalization/image_classification/erm.sh",
    "chars": 2019,
    "preview": "#!/usr/bin/env bash\n# ResNet50, PACS\nCUDA_VISIBLE_DEVICES=0 python erm.py data/PACS -d PACS -s A C S -t P -a resnet50 --"
  },
  {
    "path": "examples/domain_generalization/image_classification/groupdro.py",
    "chars": 12516,
    "preview": "\"\"\"\nAdapted from https://github.com/facebookresearch/DomainBed\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\""
  },
  {
    "path": "examples/domain_generalization/image_classification/groupdro.sh",
    "chars": 2165,
    "preview": "#!/usr/bin/env bash\n# ResNet50, PACS\nCUDA_VISIBLE_DEVICES=0 python groupdro.py data/PACS -d PACS -s A C S -t P -a resnet"
  },
  {
    "path": "examples/domain_generalization/image_classification/ibn.sh",
    "chars": 2121,
    "preview": "#!/usr/bin/env bash\n# IBN_ResNet50_b, PACS\nCUDA_VISIBLE_DEVICES=0 python erm.py data/PACS -d PACS -s A C S -t P -a resne"
  },
  {
    "path": "examples/domain_generalization/image_classification/irm.py",
    "chars": 14520,
    "preview": "\"\"\"\nAdapted from https://github.com/facebookresearch/DomainBed\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\""
  },
  {
    "path": "examples/domain_generalization/image_classification/irm.sh",
    "chars": 2145,
    "preview": "#!/usr/bin/env bash\n# ResNet50, PACS\nCUDA_VISIBLE_DEVICES=0 python irm.py data/PACS -d PACS -s A C S -t P -a resnet50 --"
  },
  {
    "path": "examples/domain_generalization/image_classification/mixstyle.py",
    "chars": 11564,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_generalization/image_classification/mixstyle.sh",
    "chars": 2403,
    "preview": "#!/usr/bin/env bash\n# ResNet50, PACS\nCUDA_VISIBLE_DEVICES=0 python mixstyle.py data/PACS -d PACS -s A C S -t P -a resnet"
  },
  {
    "path": "examples/domain_generalization/image_classification/mldg.py",
    "chars": 13746,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_generalization/image_classification/mldg.sh",
    "chars": 2053,
    "preview": "#!/usr/bin/env bash\n# ResNet50, PACS\nCUDA_VISIBLE_DEVICES=0 python mldg.py data/PACS -d PACS -s A C S -t P -a resnet50 -"
  },
  {
    "path": "examples/domain_generalization/image_classification/requirements.txt",
    "chars": 17,
    "preview": "timm\nwilds\nhigher"
  },
  {
    "path": "examples/domain_generalization/image_classification/utils.py",
    "chars": 14792,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport copy\nimport random\nimport sys\nimport time\nimport t"
  },
  {
    "path": "examples/domain_generalization/image_classification/vrex.py",
    "chars": 12869,
    "preview": "\"\"\"\n@author: Baixu Chen\n@contact: cbx_99_hasta@outlook.com\n\"\"\"\nimport random\nimport time\nimport warnings\nimport argparse"
  },
  {
    "path": "examples/domain_generalization/image_classification/vrex.sh",
    "chars": 2257,
    "preview": "#!/usr/bin/env bash\n# ResNet50, PACS\nCUDA_VISIBLE_DEVICES=0 python vrex.py data/PACS -d PACS -s A C S -t P -a resnet50 -"
  }
]

// ... and 245 more files (download for full content)

About this extraction

This page contains the full source code of the thuml/Transfer-Learning-Library GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 445 files (2.6 MB), approximately 700.7k tokens, and a symbol index with 1435 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!