Full Code of MCG-NJU/AdaMixer for AI

main a50e33d766c6 cached
967 files
12.7 MB
3.4M tokens
2194 symbols
1 requests
Copy disabled (too large) Download .txt
Showing preview only (13,538K chars total). Download the full file to get everything.
Repository: MCG-NJU/AdaMixer
Branch: main
Commit: a50e33d766c6
Files: 967
Total size: 12.7 MB

Directory structure:
gitextract_qwmo5r_u/

├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── README_zh-CN.md
├── configs/
│   ├── _base_/
│   │   ├── datasets/
│   │   │   ├── cityscapes_detection.py
│   │   │   ├── cityscapes_instance.py
│   │   │   ├── coco_detection.py
│   │   │   ├── coco_detection_tiny.py
│   │   │   ├── coco_instance.py
│   │   │   ├── coco_instance_semantic.py
│   │   │   ├── deepfashion.py
│   │   │   ├── lvis_v0.5_instance.py
│   │   │   ├── lvis_v1_instance.py
│   │   │   ├── voc0712.py
│   │   │   └── wider_face.py
│   │   ├── default_runtime.py
│   │   ├── models/
│   │   │   ├── cascade_mask_rcnn_r50_fpn.py
│   │   │   ├── cascade_rcnn_r50_fpn.py
│   │   │   ├── fast_rcnn_r50_fpn.py
│   │   │   ├── faster_rcnn_r50_caffe_c4.py
│   │   │   ├── faster_rcnn_r50_caffe_dc5.py
│   │   │   ├── faster_rcnn_r50_fpn.py
│   │   │   ├── mask_rcnn_r50_caffe_c4.py
│   │   │   ├── mask_rcnn_r50_fpn.py
│   │   │   ├── retinanet_r50_fpn.py
│   │   │   ├── rpn_r50_caffe_c4.py
│   │   │   ├── rpn_r50_fpn.py
│   │   │   └── ssd300.py
│   │   └── schedules/
│   │       ├── schedule_1x.py
│   │       ├── schedule_20e.py
│   │       └── schedule_2x.py
│   ├── adamixer/
│   │   ├── README.md
│   │   ├── adamixer_dx101_300_query_crop_mstrain_480-800_3x_coco.py
│   │   ├── adamixer_r101_300_query_crop_mstrain_480-800_3x_coco.py
│   │   ├── adamixer_r101_mstrain_480-800_3x_coco.py
│   │   ├── adamixer_r50_1x_coco.py
│   │   ├── adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py
│   │   ├── adamixer_r50_mstrain_480-800_3x_coco.py
│   │   └── adamixer_swin_s_300_query_crop_mstrain_480-800_3x_coco.py
│   ├── albu_example/
│   │   ├── README.md
│   │   └── mask_rcnn_r50_fpn_albu_1x_coco.py
│   ├── atss/
│   │   ├── README.md
│   │   ├── atss_r101_fpn_1x_coco.py
│   │   └── atss_r50_fpn_1x_coco.py
│   ├── autoassign/
│   │   ├── README.md
│   │   └── autoassign_r50_fpn_8x2_1x_coco.py
│   ├── carafe/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_carafe_1x_coco.py
│   │   └── mask_rcnn_r50_fpn_carafe_1x_coco.py
│   ├── cascade_rcnn/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_r101_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_r101_fpn_20e_coco.py
│   │   ├── cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_r50_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_r50_fpn_20e_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py
│   │   ├── cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py
│   │   ├── cascade_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── cascade_rcnn_r101_fpn_1x_coco.py
│   │   ├── cascade_rcnn_r101_fpn_20e_coco.py
│   │   ├── cascade_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── cascade_rcnn_r50_fpn_1x_coco.py
│   │   ├── cascade_rcnn_r50_fpn_20e_coco.py
│   │   ├── cascade_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── cascade_rcnn_x101_32x4d_fpn_20e_coco.py
│   │   ├── cascade_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   └── cascade_rcnn_x101_64x4d_fpn_20e_coco.py
│   ├── cascade_rpn/
│   │   ├── README.md
│   │   ├── crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py
│   │   └── crpn_r50_caffe_fpn_1x_coco.py
│   ├── centripetalnet/
│   │   ├── README.md
│   │   └── centripetalnet_hourglass104_mstest_16x6_210e_coco.py
│   ├── cityscapes/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_1x_cityscapes.py
│   │   └── mask_rcnn_r50_fpn_1x_cityscapes.py
│   ├── cornernet/
│   │   ├── README.md
│   │   ├── cornernet_hourglass104_mstest_10x5_210e_coco.py
│   │   ├── cornernet_hourglass104_mstest_32x3_210e_coco.py
│   │   └── cornernet_hourglass104_mstest_8x6_210e_coco.py
│   ├── dcn/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_dpool_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_mdpool_1x_coco.py
│   │   ├── faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│   │   └── mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
│   ├── deepfashion/
│   │   ├── README.md
│   │   └── mask_rcnn_r50_fpn_15e_deepfashion.py
│   ├── deformable_detr/
│   │   ├── README.md
│   │   ├── deformable_detr_r50_16x2_50e_coco.py
│   │   ├── deformable_detr_refine_r50_16x2_50e_coco.py
│   │   └── deformable_detr_twostage_refine_r50_16x2_50e_coco.py
│   ├── detectors/
│   │   ├── README.md
│   │   ├── cascade_rcnn_r50_rfp_1x_coco.py
│   │   ├── cascade_rcnn_r50_sac_1x_coco.py
│   │   ├── detectors_cascade_rcnn_r50_1x_coco.py
│   │   ├── detectors_htc_r50_1x_coco.py
│   │   ├── htc_r50_rfp_1x_coco.py
│   │   └── htc_r50_sac_1x_coco.py
│   ├── detr/
│   │   ├── README.md
│   │   └── detr_r50_8x2_150e_coco.py
│   ├── double_heads/
│   │   ├── README.md
│   │   └── dh_faster_rcnn_r50_fpn_1x_coco.py
│   ├── dynamic_rcnn/
│   │   ├── README.md
│   │   └── dynamic_rcnn_r50_fpn_1x_coco.py
│   ├── empirical_attention/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_attention_0010_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_attention_1111_1x_coco.py
│   │   └── faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py
│   ├── fast_rcnn/
│   │   ├── README.md
│   │   ├── fast_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── fast_rcnn_r101_fpn_1x_coco.py
│   │   ├── fast_rcnn_r101_fpn_2x_coco.py
│   │   ├── fast_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── fast_rcnn_r50_fpn_1x_coco.py
│   │   └── fast_rcnn_r50_fpn_2x_coco.py
│   ├── faster_rcnn/
│   │   ├── README.md
│   │   ├── faster_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── faster_rcnn_r101_fpn_1x_coco.py
│   │   ├── faster_rcnn_r101_fpn_2x_coco.py
│   │   ├── faster_rcnn_r50_caffe_c4_1x_coco.py
│   │   ├── faster_rcnn_r50_caffe_dc5_1x_coco.py
│   │   ├── faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py
│   │   ├── faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py
│   │   ├── faster_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py
│   │   ├── faster_rcnn_r50_fpn_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_2x_coco.py
│   │   ├── faster_rcnn_r50_fpn_bounded_iou_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_giou_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_iou_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_ohem_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_soft_nms_1x_coco.py
│   │   ├── faster_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── faster_rcnn_x101_32x4d_fpn_2x_coco.py
│   │   ├── faster_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   └── faster_rcnn_x101_64x4d_fpn_2x_coco.py
│   ├── fcos/
│   │   ├── README.md
│   │   ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py
│   │   ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py
│   │   ├── fcos_center_r50_caffe_fpn_gn-head_1x_coco.py
│   │   ├── fcos_r101_caffe_fpn_gn-head_1x_coco.py
│   │   ├── fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py
│   │   ├── fcos_r50_caffe_fpn_gn-head_1x_coco.py
│   │   ├── fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py
│   │   ├── fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py
│   │   ├── fcos_r50_torch_fpn_gn-head_4x4_1x_coco.py
│   │   └── fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py
│   ├── foveabox/
│   │   ├── README.md
│   │   ├── fovea_align_r101_fpn_gn-head_4x4_2x_coco.py
│   │   ├── fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py
│   │   ├── fovea_align_r50_fpn_gn-head_4x4_2x_coco.py
│   │   ├── fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py
│   │   ├── fovea_r101_fpn_4x4_1x_coco.py
│   │   ├── fovea_r101_fpn_4x4_2x_coco.py
│   │   ├── fovea_r50_fpn_4x4_1x_coco.py
│   │   └── fovea_r50_fpn_4x4_2x_coco.py
│   ├── fp16/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_fp16_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_fp16_1x_coco.py
│   │   └── retinanet_r50_fpn_fp16_1x_coco.py
│   ├── fpg/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py
│   │   ├── faster_rcnn_r50_fpg_crop640_50e_coco.py
│   │   ├── faster_rcnn_r50_fpn_crop640_50e_coco.py
│   │   ├── mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py
│   │   ├── mask_rcnn_r50_fpg_crop640_50e_coco.py
│   │   ├── mask_rcnn_r50_fpn_crop640_50e_coco.py
│   │   ├── retinanet_r50_fpg-chn128_crop640_50e_coco.py
│   │   └── retinanet_r50_fpg_crop640_50e_coco.py
│   ├── free_anchor/
│   │   ├── README.md
│   │   ├── retinanet_free_anchor_r101_fpn_1x_coco.py
│   │   ├── retinanet_free_anchor_r50_fpn_1x_coco.py
│   │   └── retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py
│   ├── fsaf/
│   │   ├── README.md
│   │   ├── fsaf_r101_fpn_1x_coco.py
│   │   ├── fsaf_r50_fpn_1x_coco.py
│   │   └── fsaf_x101_64x4d_fpn_1x_coco.py
│   ├── gcnet/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│   │   └── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│   ├── gfl/
│   │   ├── README.md
│   │   ├── gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py
│   │   ├── gfl_r101_fpn_mstrain_2x_coco.py
│   │   ├── gfl_r50_fpn_1x_coco.py
│   │   ├── gfl_r50_fpn_mstrain_2x_coco.py
│   │   ├── gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py
│   │   └── gfl_x101_32x4d_fpn_mstrain_2x_coco.py
│   ├── ghm/
│   │   ├── README.md
│   │   ├── retinanet_ghm_r101_fpn_1x_coco.py
│   │   ├── retinanet_ghm_r50_fpn_1x_coco.py
│   │   ├── retinanet_ghm_x101_32x4d_fpn_1x_coco.py
│   │   └── retinanet_ghm_x101_64x4d_fpn_1x_coco.py
│   ├── gn/
│   │   ├── README.md
│   │   ├── mask_rcnn_r101_fpn_gn-all_2x_coco.py
│   │   ├── mask_rcnn_r101_fpn_gn-all_3x_coco.py
│   │   ├── mask_rcnn_r50_fpn_gn-all_2x_coco.py
│   │   ├── mask_rcnn_r50_fpn_gn-all_3x_coco.py
│   │   ├── mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py
│   │   └── mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py
│   ├── gn+ws/
│   │   ├── README.md
│   │   ├── faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py
│   │   ├── faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py
│   │   ├── faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py
│   │   ├── mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py
│   │   ├── mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py
│   │   ├── mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py
│   │   ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py
│   │   └── mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py
│   ├── grid_rcnn/
│   │   ├── README.md
│   │   ├── grid_rcnn_r101_fpn_gn-head_2x_coco.py
│   │   ├── grid_rcnn_r50_fpn_gn-head_1x_coco.py
│   │   ├── grid_rcnn_r50_fpn_gn-head_2x_coco.py
│   │   ├── grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py
│   │   └── grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py
│   ├── groie/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_groie_1x_coco.py
│   │   ├── grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_groie_1x_coco.py
│   │   └── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py
│   ├── guided_anchoring/
│   │   ├── README.md
│   │   ├── ga_fast_r50_caffe_fpn_1x_coco.py
│   │   ├── ga_faster_r101_caffe_fpn_1x_coco.py
│   │   ├── ga_faster_r50_caffe_fpn_1x_coco.py
│   │   ├── ga_faster_r50_fpn_1x_coco.py
│   │   ├── ga_faster_x101_32x4d_fpn_1x_coco.py
│   │   ├── ga_faster_x101_64x4d_fpn_1x_coco.py
│   │   ├── ga_retinanet_r101_caffe_fpn_1x_coco.py
│   │   ├── ga_retinanet_r101_caffe_fpn_mstrain_2x.py
│   │   ├── ga_retinanet_r50_caffe_fpn_1x_coco.py
│   │   ├── ga_retinanet_r50_fpn_1x_coco.py
│   │   ├── ga_retinanet_x101_32x4d_fpn_1x_coco.py
│   │   ├── ga_retinanet_x101_64x4d_fpn_1x_coco.py
│   │   ├── ga_rpn_r101_caffe_fpn_1x_coco.py
│   │   ├── ga_rpn_r50_caffe_fpn_1x_coco.py
│   │   ├── ga_rpn_r50_fpn_1x_coco.py
│   │   ├── ga_rpn_x101_32x4d_fpn_1x_coco.py
│   │   └── ga_rpn_x101_64x4d_fpn_1x_coco.py
│   ├── hrnet/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py
│   │   ├── cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py
│   │   ├── cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py
│   │   ├── cascade_rcnn_hrnetv2p_w18_20e_coco.py
│   │   ├── cascade_rcnn_hrnetv2p_w32_20e_coco.py
│   │   ├── cascade_rcnn_hrnetv2p_w40_20e_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w18_1x_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w18_2x_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w32_1x_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w32_2x_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w40_1x_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w40_2x_coco.py
│   │   ├── fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py
│   │   ├── fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py
│   │   ├── fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py
│   │   ├── fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py
│   │   ├── fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py
│   │   ├── fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py
│   │   ├── fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py
│   │   ├── htc_hrnetv2p_w18_20e_coco.py
│   │   ├── htc_hrnetv2p_w32_20e_coco.py
│   │   ├── htc_hrnetv2p_w40_20e_coco.py
│   │   ├── htc_hrnetv2p_w40_28e_coco.py
│   │   ├── htc_x101_64x4d_fpn_16x1_28e_coco.py
│   │   ├── mask_rcnn_hrnetv2p_w18_1x_coco.py
│   │   ├── mask_rcnn_hrnetv2p_w18_2x_coco.py
│   │   ├── mask_rcnn_hrnetv2p_w32_1x_coco.py
│   │   ├── mask_rcnn_hrnetv2p_w32_2x_coco.py
│   │   ├── mask_rcnn_hrnetv2p_w40_1x_coco.py
│   │   └── mask_rcnn_hrnetv2p_w40_2x_coco.py
│   ├── htc/
│   │   ├── README.md
│   │   ├── htc_r101_fpn_20e_coco.py
│   │   ├── htc_r50_fpn_1x_coco.py
│   │   ├── htc_r50_fpn_20e_coco.py
│   │   ├── htc_without_semantic_r50_fpn_1x_coco.py
│   │   ├── htc_x101_32x4d_fpn_16x1_20e_coco.py
│   │   ├── htc_x101_64x4d_fpn_16x1_20e_coco.py
│   │   └── htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py
│   ├── instaboost/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py
│   │   ├── cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py
│   │   ├── cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py
│   │   ├── mask_rcnn_r101_fpn_instaboost_4x_coco.py
│   │   ├── mask_rcnn_r50_fpn_instaboost_4x_coco.py
│   │   └── mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py
│   ├── ld/
│   │   ├── README.md
│   │   ├── ld_r101_gflv1_r101dcn_fpn_coco_2x.py
│   │   ├── ld_r18_gflv1_r101_fpn_coco_1x.py
│   │   ├── ld_r34_gflv1_r101_fpn_coco_1x.py
│   │   └── ld_r50_gflv1_r101_fpn_coco_1x.py
│   ├── legacy_1.x/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_r50_fpn_1x_coco_v1.py
│   │   ├── faster_rcnn_r50_fpn_1x_coco_v1.py
│   │   ├── mask_rcnn_r50_fpn_1x_coco_v1.py
│   │   ├── retinanet_r50_caffe_fpn_1x_coco_v1.py
│   │   ├── retinanet_r50_fpn_1x_coco_v1.py
│   │   └── ssd300_coco_v1.py
│   ├── libra_rcnn/
│   │   ├── README.md
│   │   ├── libra_fast_rcnn_r50_fpn_1x_coco.py
│   │   ├── libra_faster_rcnn_r101_fpn_1x_coco.py
│   │   ├── libra_faster_rcnn_r50_fpn_1x_coco.py
│   │   ├── libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   └── libra_retinanet_r50_fpn_1x_coco.py
│   ├── lvis/
│   │   ├── README.md
│   │   ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│   │   ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│   │   ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│   │   ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│   │   ├── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│   │   └── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│   ├── mask_rcnn/
│   │   ├── README.md
│   │   ├── mask_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_2x_coco.py
│   │   ├── mask_rcnn_r50_caffe_c4_1x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py
│   │   ├── mask_rcnn_r50_fpn_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_2x_coco.py
│   │   ├── mask_rcnn_r50_fpn_poly_1x_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_2x_coco.py
│   │   ├── mask_rcnn_x101_32x8d_fpn_1x_coco.py
│   │   ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py
│   │   ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py
│   │   ├── mask_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   └── mask_rcnn_x101_64x4d_fpn_2x_coco.py
│   ├── ms_rcnn/
│   │   ├── README.md
│   │   ├── ms_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── ms_rcnn_r101_caffe_fpn_2x_coco.py
│   │   ├── ms_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── ms_rcnn_r50_caffe_fpn_2x_coco.py
│   │   ├── ms_rcnn_r50_fpn_1x_coco.py
│   │   ├── ms_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── ms_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   └── ms_rcnn_x101_64x4d_fpn_2x_coco.py
│   ├── nas_fcos/
│   │   ├── README.md
│   │   ├── nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py
│   │   └── nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py
│   ├── nas_fpn/
│   │   ├── README.md
│   │   ├── retinanet_r50_fpn_crop640_50e_coco.py
│   │   └── retinanet_r50_nasfpn_crop640_50e_coco.py
│   ├── paa/
│   │   ├── README.md
│   │   ├── paa_r101_fpn_1x_coco.py
│   │   ├── paa_r101_fpn_2x_coco.py
│   │   ├── paa_r101_fpn_mstrain_3x_coco.py
│   │   ├── paa_r50_fpn_1.5x_coco.py
│   │   ├── paa_r50_fpn_1x_coco.py
│   │   ├── paa_r50_fpn_2x_coco.py
│   │   └── paa_r50_fpn_mstrain_3x_coco.py
│   ├── pafpn/
│   │   ├── README.md
│   │   └── faster_rcnn_r50_pafpn_1x_coco.py
│   ├── pascal_voc/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_1x_voc0712.py
│   │   ├── faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py
│   │   ├── retinanet_r50_fpn_1x_voc0712.py
│   │   ├── ssd300_voc0712.py
│   │   └── ssd512_voc0712.py
│   ├── pisa/
│   │   ├── README.md
│   │   ├── pisa_faster_rcnn_r50_fpn_1x_coco.py
│   │   ├── pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── pisa_mask_rcnn_r50_fpn_1x_coco.py
│   │   ├── pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── pisa_retinanet_r50_fpn_1x_coco.py
│   │   ├── pisa_retinanet_x101_32x4d_fpn_1x_coco.py
│   │   ├── pisa_ssd300_coco.py
│   │   └── pisa_ssd512_coco.py
│   ├── point_rend/
│   │   ├── README.md
│   │   ├── point_rend_r50_caffe_fpn_mstrain_1x_coco.py
│   │   └── point_rend_r50_caffe_fpn_mstrain_3x_coco.py
│   ├── regnet/
│   │   ├── README.md
│   │   ├── faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py
│   │   ├── faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py
│   │   ├── faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
│   │   ├── mask_rcnn_regnetx-12GF_fpn_1x_coco.py
│   │   ├── mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py
│   │   ├── mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
│   │   ├── mask_rcnn_regnetx-4GF_fpn_1x_coco.py
│   │   ├── mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py
│   │   ├── mask_rcnn_regnetx-8GF_fpn_1x_coco.py
│   │   ├── retinanet_regnetx-1.6GF_fpn_1x_coco.py
│   │   ├── retinanet_regnetx-3.2GF_fpn_1x_coco.py
│   │   └── retinanet_regnetx-800MF_fpn_1x_coco.py
│   ├── reppoints/
│   │   ├── README.md
│   │   ├── bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py
│   │   ├── bbox_r50_grid_fpn_gn-neck+head_1x_coco.py
│   │   ├── reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py
│   │   ├── reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py
│   │   ├── reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py
│   │   ├── reppoints_moment_r50_fpn_1x_coco.py
│   │   ├── reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py
│   │   ├── reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py
│   │   ├── reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py
│   │   └── reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py
│   ├── res2net/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_r2_101_fpn_20e_coco.py
│   │   ├── cascade_rcnn_r2_101_fpn_20e_coco.py
│   │   ├── faster_rcnn_r2_101_fpn_2x_coco.py
│   │   ├── htc_r2_101_fpn_20e_coco.py
│   │   └── mask_rcnn_r2_101_fpn_2x_coco.py
│   ├── resnest/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│   │   ├── cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│   │   ├── cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│   │   ├── cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│   │   ├── faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│   │   ├── faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│   │   ├── mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│   │   └── mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│   ├── retinanet/
│   │   ├── README.md
│   │   ├── retinanet_r101_caffe_fpn_1x_coco.py
│   │   ├── retinanet_r101_fpn_1x_coco.py
│   │   ├── retinanet_r101_fpn_2x_coco.py
│   │   ├── retinanet_r50_caffe_fpn_1x_coco.py
│   │   ├── retinanet_r50_caffe_fpn_mstrain_1x_coco.py
│   │   ├── retinanet_r50_caffe_fpn_mstrain_2x_coco.py
│   │   ├── retinanet_r50_caffe_fpn_mstrain_3x_coco.py
│   │   ├── retinanet_r50_fpn_1x_coco.py
│   │   ├── retinanet_r50_fpn_2x_coco.py
│   │   ├── retinanet_x101_32x4d_fpn_1x_coco.py
│   │   ├── retinanet_x101_32x4d_fpn_2x_coco.py
│   │   ├── retinanet_x101_64x4d_fpn_1x_coco.py
│   │   └── retinanet_x101_64x4d_fpn_2x_coco.py
│   ├── rpn/
│   │   ├── README.md
│   │   ├── rpn_r101_caffe_fpn_1x_coco.py
│   │   ├── rpn_r101_fpn_1x_coco.py
│   │   ├── rpn_r101_fpn_2x_coco.py
│   │   ├── rpn_r50_caffe_c4_1x_coco.py
│   │   ├── rpn_r50_caffe_fpn_1x_coco.py
│   │   ├── rpn_r50_fpn_1x_coco.py
│   │   ├── rpn_r50_fpn_2x_coco.py
│   │   ├── rpn_x101_32x4d_fpn_1x_coco.py
│   │   ├── rpn_x101_32x4d_fpn_2x_coco.py
│   │   ├── rpn_x101_64x4d_fpn_1x_coco.py
│   │   └── rpn_x101_64x4d_fpn_2x_coco.py
│   ├── sabl/
│   │   ├── README.md
│   │   ├── sabl_cascade_rcnn_r101_fpn_1x_coco.py
│   │   ├── sabl_cascade_rcnn_r50_fpn_1x_coco.py
│   │   ├── sabl_faster_rcnn_r101_fpn_1x_coco.py
│   │   ├── sabl_faster_rcnn_r50_fpn_1x_coco.py
│   │   ├── sabl_retinanet_r101_fpn_1x_coco.py
│   │   ├── sabl_retinanet_r101_fpn_gn_1x_coco.py
│   │   ├── sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py
│   │   ├── sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py
│   │   ├── sabl_retinanet_r50_fpn_1x_coco.py
│   │   └── sabl_retinanet_r50_fpn_gn_1x_coco.py
│   ├── scnet/
│   │   ├── README.md
│   │   ├── scnet_r101_fpn_20e_coco.py
│   │   ├── scnet_r50_fpn_1x_coco.py
│   │   ├── scnet_r50_fpn_20e_coco.py
│   │   ├── scnet_x101_64x4d_fpn_20e_coco.py
│   │   └── scnet_x101_64x4d_fpn_8x1_20e_coco.py
│   ├── scratch/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py
│   │   └── mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py
│   ├── sparse_rcnn/
│   │   ├── README.md
│   │   ├── sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
│   │   ├── sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py
│   │   ├── sparse_rcnn_r50_fpn_1x_coco.py
│   │   ├── sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
│   │   └── sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py
│   ├── ssd/
│   │   ├── README.md
│   │   ├── ssd300_coco.py
│   │   └── ssd512_coco.py
│   ├── tridentnet/
│   │   ├── README.md
│   │   ├── tridentnet_r50_caffe_1x_coco.py
│   │   ├── tridentnet_r50_caffe_mstrain_1x_coco.py
│   │   └── tridentnet_r50_caffe_mstrain_3x_coco.py
│   ├── vfnet/
│   │   ├── README.md
│   │   ├── vfnet_r101_fpn_1x_coco.py
│   │   ├── vfnet_r101_fpn_2x_coco.py
│   │   ├── vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│   │   ├── vfnet_r101_fpn_mstrain_2x_coco.py
│   │   ├── vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│   │   ├── vfnet_r2_101_fpn_mstrain_2x_coco.py
│   │   ├── vfnet_r50_fpn_1x_coco.py
│   │   ├── vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│   │   ├── vfnet_r50_fpn_mstrain_2x_coco.py
│   │   ├── vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│   │   ├── vfnet_x101_32x4d_fpn_mstrain_2x_coco.py
│   │   ├── vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│   │   └── vfnet_x101_64x4d_fpn_mstrain_2x_coco.py
│   ├── wider_face/
│   │   ├── README.md
│   │   └── ssd300_wider_face.py
│   ├── yolact/
│   │   ├── README.md
│   │   ├── yolact_r101_1x8_coco.py
│   │   ├── yolact_r50_1x8_coco.py
│   │   └── yolact_r50_8x8_coco.py
│   ├── yolo/
│   │   ├── README.md
│   │   ├── yolov3_d53_320_273e_coco.py
│   │   ├── yolov3_d53_mstrain-416_273e_coco.py
│   │   └── yolov3_d53_mstrain-608_273e_coco.py
│   └── yolof/
│       ├── README.md
│       ├── yolof_r50_c5_8x8_1x_coco.py
│       └── yolof_r50_c5_8x8_iter-1x_coco.py
├── demo/
│   ├── MMDet_Tutorial.ipynb
│   ├── create_result_gif.py
│   ├── image_demo.py
│   ├── inference_demo.ipynb
│   ├── video_demo.py
│   └── webcam_demo.py
├── demo.py
├── docker/
│   ├── Dockerfile
│   └── serve/
│       ├── Dockerfile
│       ├── config.properties
│       └── entrypoint.sh
├── docs/
│   ├── 1_exist_data_model.md
│   ├── 2_new_data_model.md
│   ├── 3_exist_data_new_model.md
│   ├── Makefile
│   ├── api.rst
│   ├── changelog.md
│   ├── compatibility.md
│   ├── conf.py
│   ├── conventions.md
│   ├── faq.md
│   ├── get_started.md
│   ├── index.rst
│   ├── make.bat
│   ├── model_zoo.md
│   ├── projects.md
│   ├── robustness_benchmarking.md
│   ├── stat.py
│   ├── tutorials/
│   │   ├── config.md
│   │   ├── customize_dataset.md
│   │   ├── customize_losses.md
│   │   ├── customize_models.md
│   │   ├── customize_runtime.md
│   │   ├── data_pipeline.md
│   │   ├── finetune.md
│   │   ├── index.rst
│   │   ├── onnx2tensorrt.md
│   │   └── pytorch2onnx.md
│   └── useful_tools.md
├── mmdet/
│   ├── __init__.py
│   ├── apis/
│   │   ├── __init__.py
│   │   ├── inference.py
│   │   ├── test.py
│   │   └── train.py
│   ├── core/
│   │   ├── __init__.py
│   │   ├── anchor/
│   │   │   ├── __init__.py
│   │   │   ├── anchor_generator.py
│   │   │   ├── builder.py
│   │   │   ├── point_generator.py
│   │   │   └── utils.py
│   │   ├── bbox/
│   │   │   ├── __init__.py
│   │   │   ├── assigners/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── approx_max_iou_assigner.py
│   │   │   │   ├── assign_result.py
│   │   │   │   ├── atss_assigner.py
│   │   │   │   ├── base_assigner.py
│   │   │   │   ├── center_region_assigner.py
│   │   │   │   ├── grid_assigner.py
│   │   │   │   ├── hungarian_assigner.py
│   │   │   │   ├── max_iou_assigner.py
│   │   │   │   ├── point_assigner.py
│   │   │   │   ├── region_assigner.py
│   │   │   │   └── uniform_assigner.py
│   │   │   ├── builder.py
│   │   │   ├── coder/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_bbox_coder.py
│   │   │   │   ├── bucketing_bbox_coder.py
│   │   │   │   ├── delta_xywh_bbox_coder.py
│   │   │   │   ├── legacy_delta_xywh_bbox_coder.py
│   │   │   │   ├── pseudo_bbox_coder.py
│   │   │   │   ├── tblr_bbox_coder.py
│   │   │   │   └── yolo_bbox_coder.py
│   │   │   ├── demodata.py
│   │   │   ├── iou_calculators/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── builder.py
│   │   │   │   └── iou2d_calculator.py
│   │   │   ├── match_costs/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── builder.py
│   │   │   │   └── match_cost.py
│   │   │   ├── samplers/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_sampler.py
│   │   │   │   ├── combined_sampler.py
│   │   │   │   ├── instance_balanced_pos_sampler.py
│   │   │   │   ├── iou_balanced_neg_sampler.py
│   │   │   │   ├── ohem_sampler.py
│   │   │   │   ├── pseudo_sampler.py
│   │   │   │   ├── random_sampler.py
│   │   │   │   ├── sampling_result.py
│   │   │   │   └── score_hlr_sampler.py
│   │   │   └── transforms.py
│   │   ├── evaluation/
│   │   │   ├── __init__.py
│   │   │   ├── bbox_overlaps.py
│   │   │   ├── class_names.py
│   │   │   ├── eval_hooks.py
│   │   │   ├── mean_ap.py
│   │   │   └── recall.py
│   │   ├── export/
│   │   │   ├── __init__.py
│   │   │   ├── model_wrappers.py
│   │   │   ├── onnx_helper.py
│   │   │   └── pytorch2onnx.py
│   │   ├── mask/
│   │   │   ├── __init__.py
│   │   │   ├── mask_target.py
│   │   │   ├── structures.py
│   │   │   └── utils.py
│   │   ├── post_processing/
│   │   │   ├── __init__.py
│   │   │   ├── bbox_nms.py
│   │   │   └── merge_augs.py
│   │   ├── utils/
│   │   │   ├── __init__.py
│   │   │   ├── dist_utils.py
│   │   │   └── misc.py
│   │   └── visualization/
│   │       ├── __init__.py
│   │       └── image.py
│   ├── datasets/
│   │   ├── __init__.py
│   │   ├── api_wrappers/
│   │   │   ├── __init__.py
│   │   │   └── coco_api.py
│   │   ├── builder.py
│   │   ├── cityscapes.py
│   │   ├── coco.py
│   │   ├── custom.py
│   │   ├── dataset_wrappers.py
│   │   ├── deepfashion.py
│   │   ├── lvis.py
│   │   ├── pipelines/
│   │   │   ├── __init__.py
│   │   │   ├── auto_augment.py
│   │   │   ├── compose.py
│   │   │   ├── formating.py
│   │   │   ├── instaboost.py
│   │   │   ├── loading.py
│   │   │   ├── test_time_aug.py
│   │   │   └── transforms.py
│   │   ├── samplers/
│   │   │   ├── __init__.py
│   │   │   ├── distributed_sampler.py
│   │   │   └── group_sampler.py
│   │   ├── utils.py
│   │   ├── voc.py
│   │   ├── wider_face.py
│   │   └── xml_style.py
│   ├── models/
│   │   ├── __init__.py
│   │   ├── backbones/
│   │   │   ├── __init__.py
│   │   │   ├── darknet.py
│   │   │   ├── detectors_resnet.py
│   │   │   ├── detectors_resnext.py
│   │   │   ├── hourglass.py
│   │   │   ├── hrnet.py
│   │   │   ├── regnet.py
│   │   │   ├── res2net.py
│   │   │   ├── resnest.py
│   │   │   ├── resnet.py
│   │   │   ├── resnext.py
│   │   │   ├── ssd_vgg.py
│   │   │   ├── swin.py
│   │   │   └── trident_resnet.py
│   │   ├── builder.py
│   │   ├── dense_heads/
│   │   │   ├── __init__.py
│   │   │   ├── anchor_free_head.py
│   │   │   ├── anchor_head.py
│   │   │   ├── atss_head.py
│   │   │   ├── autoassign_head.py
│   │   │   ├── base_dense_head.py
│   │   │   ├── cascade_rpn_head.py
│   │   │   ├── centripetal_head.py
│   │   │   ├── corner_head.py
│   │   │   ├── deformable_detr_head.py
│   │   │   ├── dense_test_mixins.py
│   │   │   ├── detr_head.py
│   │   │   ├── embedding_rpn_head.py
│   │   │   ├── fcos_head.py
│   │   │   ├── fovea_head.py
│   │   │   ├── free_anchor_retina_head.py
│   │   │   ├── fsaf_head.py
│   │   │   ├── ga_retina_head.py
│   │   │   ├── ga_rpn_head.py
│   │   │   ├── gfl_head.py
│   │   │   ├── guided_anchor_head.py
│   │   │   ├── ld_head.py
│   │   │   ├── nasfcos_head.py
│   │   │   ├── paa_head.py
│   │   │   ├── pisa_retinanet_head.py
│   │   │   ├── pisa_ssd_head.py
│   │   │   ├── query_generator.py
│   │   │   ├── reppoints_head.py
│   │   │   ├── retina_head.py
│   │   │   ├── retina_sepbn_head.py
│   │   │   ├── rpn_head.py
│   │   │   ├── rpn_test_mixin.py
│   │   │   ├── sabl_retina_head.py
│   │   │   ├── ssd_head.py
│   │   │   ├── vfnet_head.py
│   │   │   ├── yolact_head.py
│   │   │   ├── yolo_head.py
│   │   │   └── yolof_head.py
│   │   ├── detectors/
│   │   │   ├── __init__.py
│   │   │   ├── atss.py
│   │   │   ├── autoassign.py
│   │   │   ├── base.py
│   │   │   ├── cascade_rcnn.py
│   │   │   ├── cornernet.py
│   │   │   ├── deformable_detr.py
│   │   │   ├── detr.py
│   │   │   ├── fast_rcnn.py
│   │   │   ├── faster_rcnn.py
│   │   │   ├── fcos.py
│   │   │   ├── fovea.py
│   │   │   ├── fsaf.py
│   │   │   ├── gfl.py
│   │   │   ├── grid_rcnn.py
│   │   │   ├── htc.py
│   │   │   ├── kd_one_stage.py
│   │   │   ├── mask_rcnn.py
│   │   │   ├── mask_scoring_rcnn.py
│   │   │   ├── nasfcos.py
│   │   │   ├── paa.py
│   │   │   ├── point_rend.py
│   │   │   ├── query_based.py
│   │   │   ├── reppoints_detector.py
│   │   │   ├── retinanet.py
│   │   │   ├── rpn.py
│   │   │   ├── scnet.py
│   │   │   ├── single_stage.py
│   │   │   ├── sparse_rcnn.py
│   │   │   ├── trident_faster_rcnn.py
│   │   │   ├── two_stage.py
│   │   │   ├── vfnet.py
│   │   │   ├── yolact.py
│   │   │   ├── yolo.py
│   │   │   └── yolof.py
│   │   ├── losses/
│   │   │   ├── __init__.py
│   │   │   ├── accuracy.py
│   │   │   ├── ae_loss.py
│   │   │   ├── balanced_l1_loss.py
│   │   │   ├── cross_entropy_loss.py
│   │   │   ├── focal_loss.py
│   │   │   ├── gaussian_focal_loss.py
│   │   │   ├── gfocal_loss.py
│   │   │   ├── ghm_loss.py
│   │   │   ├── iou_loss.py
│   │   │   ├── kd_loss.py
│   │   │   ├── mse_loss.py
│   │   │   ├── pisa_loss.py
│   │   │   ├── smooth_l1_loss.py
│   │   │   ├── utils.py
│   │   │   └── varifocal_loss.py
│   │   ├── necks/
│   │   │   ├── __init__.py
│   │   │   ├── bfp.py
│   │   │   ├── channel_mapper.py
│   │   │   ├── dilated_encoder.py
│   │   │   ├── fpg.py
│   │   │   ├── fpn.py
│   │   │   ├── fpn_carafe.py
│   │   │   ├── hrfpn.py
│   │   │   ├── identity_fpn.py
│   │   │   ├── nas_fpn.py
│   │   │   ├── nasfcos_fpn.py
│   │   │   ├── pafpn.py
│   │   │   ├── rfp.py
│   │   │   └── yolo_neck.py
│   │   ├── roi_heads/
│   │   │   ├── __init__.py
│   │   │   ├── adamixer_decoder.py
│   │   │   ├── base_roi_head.py
│   │   │   ├── bbox_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── adamixer_decoder_stage.py
│   │   │   │   ├── adaptive_mixing_operator.py
│   │   │   │   ├── bbox_head.py
│   │   │   │   ├── convfc_bbox_head.py
│   │   │   │   ├── dii_head.py
│   │   │   │   ├── double_bbox_head.py
│   │   │   │   ├── sabl_head.py
│   │   │   │   ├── sampling_3d_operator.py
│   │   │   │   └── scnet_bbox_head.py
│   │   │   ├── cascade_roi_head.py
│   │   │   ├── double_roi_head.py
│   │   │   ├── dynamic_roi_head.py
│   │   │   ├── grid_roi_head.py
│   │   │   ├── htc_roi_head.py
│   │   │   ├── mask_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── coarse_mask_head.py
│   │   │   │   ├── fcn_mask_head.py
│   │   │   │   ├── feature_relay_head.py
│   │   │   │   ├── fused_semantic_head.py
│   │   │   │   ├── global_context_head.py
│   │   │   │   ├── grid_head.py
│   │   │   │   ├── htc_mask_head.py
│   │   │   │   ├── mask_point_head.py
│   │   │   │   ├── maskiou_head.py
│   │   │   │   ├── scnet_mask_head.py
│   │   │   │   └── scnet_semantic_head.py
│   │   │   ├── mask_scoring_roi_head.py
│   │   │   ├── pisa_roi_head.py
│   │   │   ├── point_rend_roi_head.py
│   │   │   ├── roi_extractors/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_roi_extractor.py
│   │   │   │   ├── generic_roi_extractor.py
│   │   │   │   └── single_level_roi_extractor.py
│   │   │   ├── scnet_roi_head.py
│   │   │   ├── shared_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   └── res_layer.py
│   │   │   ├── sparse_roi_head.py
│   │   │   ├── standard_roi_head.py
│   │   │   ├── test_mixins.py
│   │   │   └── trident_roi_head.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       ├── builder.py
│   │       ├── gaussian_target.py
│   │       ├── positional_encoding.py
│   │       ├── res_layer.py
│   │       └── transformer.py
│   ├── temp.txt
│   ├── utils/
│   │   ├── __init__.py
│   │   ├── collect_env.py
│   │   ├── contextmanagers.py
│   │   ├── logger.py
│   │   ├── profiling.py
│   │   ├── util_mixins.py
│   │   └── util_random.py
│   └── version.py
├── pytest.ini
├── requirements/
│   ├── build.txt
│   ├── docs.txt
│   ├── optional.txt
│   ├── readthedocs.txt
│   ├── runtime.txt
│   └── tests.txt
├── requirements.txt
├── setup.cfg
├── setup.py
├── test_module.py
├── tests/
│   ├── test_data/
│   │   ├── test_datasets/
│   │   │   ├── test_coco_dataset.py
│   │   │   ├── test_common.py
│   │   │   ├── test_custom_dataset.py
│   │   │   ├── test_dataset_wrapper.py
│   │   │   └── test_xml_dataset.py
│   │   ├── test_pipelines/
│   │   │   ├── test_formatting.py
│   │   │   ├── test_loading.py
│   │   │   ├── test_sampler.py
│   │   │   └── test_transform/
│   │   │       ├── test_img_augment.py
│   │   │       ├── test_models_aug_test.py
│   │   │       ├── test_rotate.py
│   │   │       ├── test_shear.py
│   │   │       ├── test_transform.py
│   │   │       └── test_translate.py
│   │   └── test_utils.py
│   ├── test_metrics/
│   │   ├── test_box_overlap.py
│   │   └── test_losses.py
│   ├── test_models/
│   │   ├── test_backbones/
│   │   │   ├── __init__.py
│   │   │   ├── test_hourglass.py
│   │   │   ├── test_regnet.py
│   │   │   ├── test_renext.py
│   │   │   ├── test_res2net.py
│   │   │   ├── test_resnest.py
│   │   │   ├── test_resnet.py
│   │   │   ├── test_trident_resnet.py
│   │   │   └── utils.py
│   │   ├── test_dense_heads/
│   │   │   ├── test_anchor_head.py
│   │   │   ├── test_atss_head.py
│   │   │   ├── test_autoassign_head.py
│   │   │   ├── test_corner_head.py
│   │   │   ├── test_detr_head.py
│   │   │   ├── test_fcos_head.py
│   │   │   ├── test_fsaf_head.py
│   │   │   ├── test_ga_anchor_head.py
│   │   │   ├── test_gfl_head.py
│   │   │   ├── test_ld_head.py
│   │   │   ├── test_paa_head.py
│   │   │   ├── test_pisa_head.py
│   │   │   ├── test_sabl_retina_head.py
│   │   │   ├── test_vfnet_head.py
│   │   │   ├── test_yolact_head.py
│   │   │   └── test_yolof_head.py
│   │   ├── test_forward.py
│   │   ├── test_loss.py
│   │   ├── test_necks.py
│   │   ├── test_roi_heads/
│   │   │   ├── __init__.py
│   │   │   ├── test_bbox_head.py
│   │   │   ├── test_mask_head.py
│   │   │   ├── test_roi_extractor.py
│   │   │   ├── test_sabl_bbox_head.py
│   │   │   └── utils.py
│   │   └── test_utils/
│   │       ├── test_position_encoding.py
│   │       └── test_transformer.py
│   ├── test_onnx/
│   │   ├── __init__.py
│   │   ├── test_head.py
│   │   ├── test_neck.py
│   │   └── utils.py
│   ├── test_runtime/
│   │   ├── async_benchmark.py
│   │   ├── test_async.py
│   │   ├── test_config.py
│   │   ├── test_eval_hook.py
│   │   └── test_fp16.py
│   └── test_utils/
│       ├── test_anchor.py
│       ├── test_assigner.py
│       ├── test_coder.py
│       ├── test_masks.py
│       ├── test_misc.py
│       ├── test_version.py
│       └── test_visualization.py
├── tools/
│   ├── analysis_tools/
│   │   ├── analyze_logs.py
│   │   ├── analyze_results.py
│   │   ├── benchmark.py
│   │   ├── coco_error_analysis.py
│   │   ├── eval_metric.py
│   │   ├── get_flops.py
│   │   ├── robustness_eval.py
│   │   └── test_robustness.py
│   ├── dataset_converters/
│   │   ├── cityscapes.py
│   │   └── pascal_voc.py
│   ├── deployment/
│   │   ├── mmdet2torchserve.py
│   │   ├── mmdet_handler.py
│   │   ├── onnx2tensorrt.py
│   │   ├── pytorch2onnx.py
│   │   └── test.py
│   ├── dist_test.sh
│   ├── dist_train.sh
│   ├── misc/
│   │   ├── browse_dataset.py
│   │   └── print_config.py
│   ├── model_converters/
│   │   ├── detectron2pytorch.py
│   │   ├── publish_model.py
│   │   ├── regnet2mmdet.py
│   │   └── upgrade_model_version.py
│   ├── slurm_test.sh
│   ├── slurm_train.sh
│   ├── test.py
│   └── train.py
├── visualizations/
│   └── README.md
└── visualize_sampling_points.ipynb

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
**/.DS_Store
.vscode


================================================
FILE: LICENSE
================================================
MIT License

Copyright (c) 2022 Multimedia Computing Group, Nanjing University

Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.


================================================
FILE: Makefile
================================================
adamixer-r50:
	CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29502 ./tools/dist_train.sh \
	configs/adamixer/adamixer_r50_1x_coco.py \
	8

adamixer-r50-3x:
	CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29501 ./tools/dist_train.sh \
	configs/adamixer/adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py \
	8

adamixer-r101-3x:
	CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29501 ./tools/dist_train.sh \
	configs/adamixer/adamixer_r101_300_query_crop_mstrain_480-800_3x_coco.py \
	8

adamixer-dx101-3x:
	CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29501 ./tools/dist_train.sh \
	configs/adamixer/adamixer_dx101_300_query_crop_mstrain_480-800_3x_coco.py \
	8

adamixer-swin_s-3x:
	CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29501 ./tools/dist_train.sh \
	configs/adamixer/adamixer_swin_s_300_query_crop_mstrain_480-800_3x_coco.py \
	8


================================================
FILE: README.md
================================================
# AdaMixer: A Fast-Converging Query-Based Object Detector [arxiv](https://arxiv.org/abs/2203.16507)

> [**AdaMixer: A Fast-Converging Query-Based Object Detector**](https://arxiv.org/abs/2203.16507)<br>
> _accept to CVPR 2022 as an oral presentation_ <br>
> [Ziteng Gao](https://sebgao.github.io), [Limin Wang](http://wanglimin.github.io/), Bing Han, Sheng Guo<br>Nanjing University, MYbank Ant Group

[[slides]](adamixer_cvpr_2022_keynote.pdf)
[[arxiv]](https://arxiv.org/abs/2203.16507)

## 📰 News
[2022.7.3] Reproduced model checkpoints and logs are available.

[2022.4.4] The code is available now.

[2022.3.31] Code will be released in a few days (not too long). Pre-trained models will take some time to grant the permission of Ant Group to be available online. Please stay tuned or *watch this repo* for quick information.

## ✨ Highlights
### 🆕 MLP-Mixer for Object Detection
To our best knowledge, we are the first to introduce the MLP-Mixer for Object detection. The MLP-Mixer is used in the DETR-like decoder in an adaptive and query-wise manner to enrich the adaptibility to varying objects across images.

### ⚡️ Fast Converging DETR-like Architecture
AdaMixer enjoys fast convergence speed and reach up to 45.0 AP on COCO val within 12 epochs with only the architectural design improvement. Our method is compatible with other training improvements, like [multiple predictions from a query](https://github.com/megvii-research/AnchorDETR) and [denosing training](https://github.com/FengLi-ust/DN-DETR), which are expected to improve AdaMixer further (we have not tried yet). 

### 🧱 Simple Architecture, NO extra attentional encoders or FPNs required
Our AdaMixer does not hunger for extra attention encoders or explicit feature pyramid networks. Instead, we improve the query decoder in DETR-like detectors to keep the architecture as simple, efficient, and strong as possible.





## ➡️ Guide to Our Code
Our code structure follows the MMDetection framework. To get started, please refer to mmdetection doc [get_started.md](docs/get_started.md) for installation.

Our AdaMixer config file lies in [configs/adamixer](configs/adamixer) folder. You can start training our detectors with make targets in [Makefile](Makefile).

The code of a AdaMixer decoder stage is in
[mmdet/models/roi_heads/bbox_heads/adamixer_decoder_stage.py](mmdet/models/roi_heads/bbox_heads/adamixer_decoder_stage.py).
The code of the 3D feature space sampling is in [mmdet/models/roi_heads/bbox_heads/sampling_3d_operator.py](mmdet/models/roi_heads/bbox_heads/sampling_3d_operator.py).
The code of the adaptive mixing process is in [mmdet/models/roi_heads/bbox_heads/adaptive_mixing_operator.py](mmdet/models/roi_heads/bbox_heads/adaptive_mixing_operator.py).


__NOTE:__
1. Please use `mmcv_full==1.3.3` and `pytorch>=1.5.0` for correct reproduction ([#4](/../../issues/4), [#12](/../../issues/12)).~~Please make sure `init_weight` methods in `AdaptiveSamplingMixing` and `AdaptiveMixing`  are called for correct initializations *AND* the initialized weights are not overrided by other methods (some MMCV versions may incur repeated initializations).~~
2. We notice ~0.3 AP (42.7 AP reported in the paper) noise for AdaMixer w/ R50 with 1x training settings.

## 🧪 Main Results
Checkpoints and logs are available at [google drive](https://drive.google.com/drive/folders/1VPP-wJV6BzI_8MVy33RQZkV8ONgg5uLQ?usp=sharing).

| config |  detector | backbone  | APval | APtest  | APval (reprod.) | ckpt (reprod.) | log (reprod.) |
| :---: | :-------: | :------:  | :---: | :----:  | :---: | :---: | :---: | 
| [config](configs/adamixer/adamixer_r50_1x_coco.py) | AdaMixer (1x schedule, 100 queries)   |  R50     |  42.7  |   |  42.6 |[ckpt](https://drive.google.com/file/d/1v3rDczN2VXSgRYmSX0-XRy8racgpfed2/view?usp=sharing) | [log](https://drive.google.com/file/d/1nSjufpRiRYUn_gfJiG2RAv_MTWC2NiNr/view?usp=sharing) |
| [config](configs/adamixer/adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py) | AdaMixer (3x schedule, 300 queries)  |  R50     |  47.0  | 47.2   |  46.8 |[ckpt](https://drive.google.com/file/d/1Qj3sbcFF1zV8oeSfAWzZnYqv8mXWN8wJ/view?usp=sharing) | [log](https://drive.google.com/file/d/1EUAYtK-owJj1vlFG-NGaezw2uNWTjvzv/view?usp=sharing) |
| [config](configs/adamixer/adamixer_r101_300_query_crop_mstrain_480-800_3x_coco.py) | AdaMixer (3x schedule, 300 queries)  |  R101    |  48.0  | 48.1   |  48.1 | [ckpt](https://drive.google.com/file/d/1dx1-FZ20VDX7nCHtXh2_5AMrSysKEGK8/view?usp=sharing) | [log](https://drive.google.com/file/d/1MIocHsn9PHxWh5f1uua1OsEX_zbqiyX0/view?usp=sharing)
| [config](configs/adamixer/adamixer_dx101_300_query_crop_mstrain_480-800_3x_coco.py) | AdaMixer (3x schedule, 300 queries)  |  X101-DCN|  49.5  | 49.3   |  49.7 | [ckpt](https://drive.google.com/file/d/1vbIYuq8hvebP-DkqyFCMFVh5CSBjZ8cA/view?usp=sharing) | [log](https://drive.google.com/file/d/1nztwEaVSvNaM5os9NsecV97jilgHIuoO/view?usp=sharing)
| [config](configs/adamixer/adamixer_swin_s_300_query_crop_mstrain_480-800_3x_coco.py) | AdaMixer (3x schedule, 300 queries)   |  Swin-S  |  51.3  | 51.3   | on the way |

Special thanks to [Zhan Tong](https://github.com/yztongzhan) for these reproduced models.

## ✏️ Citation
If you find AdaMixer useful in your research, please cite us using the following entry:
```
@inproceedings{adamixer22cvpr,
  author    = {Ziteng Gao and
               Limin Wang and
               Bing Han and
               Sheng Guo},
  title     = {AdaMixer: A Fast-Converging Query-Based Object Detector},
  booktitle = {{CVPR}},
  year      = {2022}
}
```


## 👍 Acknowledgement
Thanks to [Zhan Tong](https://github.com/yztongzhan) and Zihua Xiong for their help.
























## Original MMDetection README.md
_The following begins the original mmdetection README.md file_
<div align="center">
  <img src="resources/mmdet-logo.png" width="600"/>
</div>

**News**: We released the technical report on [ArXiv](https://arxiv.org/abs/1906.07155).

Documentation: https://mmdetection.readthedocs.io/

## Introduction

English | [简体中文](README_zh-CN.md)

MMDetection is an open source object detection toolbox based on PyTorch. It is
a part of the [OpenMMLab](https://openmmlab.com/) project.

The master branch works with **PyTorch 1.3+**.
The old v1.x branch works with PyTorch 1.1 to 1.4, but v2.0 is strongly recommended for faster speed, higher performance, better design and more friendly usage.

![demo image](resources/coco_test_12510.jpg)

### Major features

- **Modular Design**

  We decompose the detection framework into different components and one can easily construct a customized object detection framework by combining different modules.

- **Support of multiple frameworks out of box**

  The toolbox directly supports popular and contemporary detection frameworks, *e.g.* Faster RCNN, Mask RCNN, RetinaNet, etc.

- **High efficiency**

  All basic bbox and mask operations run on GPUs. The training speed is faster than or comparable to other codebases, including [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) and [SimpleDet](https://github.com/TuSimple/simpledet).

- **State of the art**

  The toolbox stems from the codebase developed by the *MMDet* team, who won [COCO Detection Challenge](http://cocodataset.org/#detection-leaderboard) in 2018, and we keep pushing it forward.

Apart from MMDetection, we also released a library [mmcv](https://github.com/open-mmlab/mmcv) for computer vision research, which is heavily depended on by this toolbox.

## License

The mmdetection project is released under the [Apache 2.0 license](https://github.com/open-mmlab/mmdetection/blob/master/LICENSE).

## Changelog

v2.12.0 was released in 01/05/2021.
Please refer to [changelog.md](docs/changelog.md) for details and release history.
A comparison between v1.x and v2.0 codebases can be found in [compatibility.md](docs/compatibility.md).

## Benchmark and model zoo

Results and models are available in the [model zoo](docs/model_zoo.md).

Supported backbones:

- [x] ResNet (CVPR'2016)
- [x] ResNeXt (CVPR'2017)
- [x] VGG (ICLR'2015)
- [x] HRNet (CVPR'2019)
- [x] RegNet (CVPR'2020)
- [x] Res2Net (TPAMI'2020)
- [x] ResNeSt (ArXiv'2020)

Supported methods:

- [x] [RPN (NeurIPS'2015)](configs/rpn)
- [x] [Fast R-CNN (ICCV'2015)](configs/fast_rcnn)
- [x] [Faster R-CNN (NeurIPS'2015)](configs/faster_rcnn)
- [x] [Mask R-CNN (ICCV'2017)](configs/mask_rcnn)
- [x] [Cascade R-CNN (CVPR'2018)](configs/cascade_rcnn)
- [x] [Cascade Mask R-CNN (CVPR'2018)](configs/cascade_rcnn)
- [x] [SSD (ECCV'2016)](configs/ssd)
- [x] [RetinaNet (ICCV'2017)](configs/retinanet)
- [x] [GHM (AAAI'2019)](configs/ghm)
- [x] [Mask Scoring R-CNN (CVPR'2019)](configs/ms_rcnn)
- [x] [Double-Head R-CNN (CVPR'2020)](configs/double_heads)
- [x] [Hybrid Task Cascade (CVPR'2019)](configs/htc)
- [x] [Libra R-CNN (CVPR'2019)](configs/libra_rcnn)
- [x] [Guided Anchoring (CVPR'2019)](configs/guided_anchoring)
- [x] [FCOS (ICCV'2019)](configs/fcos)
- [x] [RepPoints (ICCV'2019)](configs/reppoints)
- [x] [Foveabox (TIP'2020)](configs/foveabox)
- [x] [FreeAnchor (NeurIPS'2019)](configs/free_anchor)
- [x] [NAS-FPN (CVPR'2019)](configs/nas_fpn)
- [x] [ATSS (CVPR'2020)](configs/atss)
- [x] [FSAF (CVPR'2019)](configs/fsaf)
- [x] [PAFPN (CVPR'2018)](configs/pafpn)
- [x] [Dynamic R-CNN (ECCV'2020)](configs/dynamic_rcnn)
- [x] [PointRend (CVPR'2020)](configs/point_rend)
- [x] [CARAFE (ICCV'2019)](configs/carafe/README.md)
- [x] [DCNv2 (CVPR'2019)](configs/dcn/README.md)
- [x] [Group Normalization (ECCV'2018)](configs/gn/README.md)
- [x] [Weight Standardization (ArXiv'2019)](configs/gn+ws/README.md)
- [x] [OHEM (CVPR'2016)](configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py)
- [x] [Soft-NMS (ICCV'2017)](configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py)
- [x] [Generalized Attention (ICCV'2019)](configs/empirical_attention/README.md)
- [x] [GCNet (ICCVW'2019)](configs/gcnet/README.md)
- [x] [Mixed Precision (FP16) Training (ArXiv'2017)](configs/fp16/README.md)
- [x] [InstaBoost (ICCV'2019)](configs/instaboost/README.md)
- [x] [GRoIE (ICPR'2020)](configs/groie/README.md)
- [x] [DetectoRS (ArXix'2020)](configs/detectors/README.md)
- [x] [Generalized Focal Loss (NeurIPS'2020)](configs/gfl/README.md)
- [x] [CornerNet (ECCV'2018)](configs/cornernet/README.md)
- [x] [Side-Aware Boundary Localization (ECCV'2020)](configs/sabl/README.md)
- [x] [YOLOv3 (ArXiv'2018)](configs/yolo/README.md)
- [x] [PAA (ECCV'2020)](configs/paa/README.md)
- [x] [YOLACT (ICCV'2019)](configs/yolact/README.md)
- [x] [CentripetalNet (CVPR'2020)](configs/centripetalnet/README.md)
- [x] [VFNet (ArXix'2020)](configs/vfnet/README.md)
- [x] [DETR (ECCV'2020)](configs/detr/README.md)
- [x] [Deformable DETR (ICLR'2021)](configs/deformable_detr/README.md)
- [x] [CascadeRPN (NeurIPS'2019)](configs/cascade_rpn/README.md)
- [x] [SCNet (AAAI'2021)](configs/scnet/README.md)
- [x] [AutoAssign (ArXix'2020)](configs/autoassign/README.md)
- [x] [YOLOF (CVPR'2021)](configs/yolof/README.md)


Some other methods are also supported in [projects using MMDetection](./docs/projects.md).

## Installation

Please refer to [get_started.md](docs/get_started.md) for installation.

## Getting Started

Please see [get_started.md](docs/get_started.md) for the basic usage of MMDetection.
We provide [colab tutorial](demo/MMDet_Tutorial.ipynb), and full guidance for quick run [with existing dataset](docs/1_exist_data_model.md) and [with new dataset](docs/2_new_data_model.md) for beginners.
There are also tutorials for [finetuning models](docs/tutorials/finetune.md), [adding new dataset](docs/tutorials/new_dataset.md), [designing data pipeline](docs/tutorials/data_pipeline.md), [customizing models](docs/tutorials/customize_models.md), [customizing runtime settings](docs/tutorials/customize_runtime.md) and [useful tools](docs/useful_tools.md).

Please refer to [FAQ](docs/faq.md) for frequently asked questions.

## Contributing

We appreciate all contributions to improve MMDetection. Please refer to [CONTRIBUTING.md](.github/CONTRIBUTING.md) for the contributing guideline.

## Acknowledgement

MMDetection is an open source project that is contributed by researchers and engineers from various colleges and companies. We appreciate all the contributors who implement their methods or add new features, as well as users who give valuable feedbacks.
We wish that the toolbox and benchmark could serve the growing research community by providing a flexible toolkit to reimplement existing methods and develop their own new detectors.

## Citation

If you use this toolbox or benchmark in your research, please cite this project.

```
@article{mmdetection,
  title   = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
  author  = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
             Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
             Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
             Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
             Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
             and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
  journal= {arXiv preprint arXiv:1906.07155},
  year={2019}
}
```

## Projects in OpenMMLab

- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab foundational library for computer vision.
- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab image classification toolbox and benchmark.
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab detection toolbox and benchmark.
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection.
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab semantic segmentation toolbox and benchmark.
- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab's next-generation action understanding toolbox and benchmark.
- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab video perception toolbox and benchmark.
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab pose estimation toolbox and benchmark.
- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab image and video editing toolbox.
- [MMOCR](https://github.com/open-mmlab/mmocr): A Comprehensive Toolbox for Text Detection, Recognition and Understanding.
- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab image and video generative models toolbox.


================================================
FILE: README_zh-CN.md
================================================
<div align="center">
  <img src="resources/mmdet-logo.png" width="600"/>
</div>

**新闻**: 我们在 [ArXiv](https://arxiv.org/abs/1906.07155) 上公开了技术报告。

文档: https://mmdetection.readthedocs.io/

## 简介

[English](README.md) | 简体中文

MMDetection 是一个基于 PyTorch 的目标检测开源工具箱。它是 [OpenMMLab](https://openmmlab.com/) 项目的一部分。

主分支代码目前支持 PyTorch 1.3 以上的版本。

v1.x 的历史版本支持 PyTorch 1.1 到 1.4,但是我们强烈建议用户使用新的 2.x 的版本,新的版本速度更快,性能更高,有更优雅的代码设计,对用户使用也更加友好。

![demo image](resources/coco_test_12510.jpg)

### 主要特性

- **模块化设计**

  MMDetection 将检测框架解耦成不同的模块组件,通过组合不同的模块组件,用户可以便捷地构建自定义的检测模型

- **丰富的即插即用的算法和模型**

  MMDetection 支持了众多主流的和最新的检测算法,例如 Faster R-CNN,Mask R-CNN,RetinaNet 等。

- **速度快**

  基本的框和 mask 操作都实现了 GPU 版本,训练速度比其他代码库更快或者相当,包括 [Detectron2](https://github.com/facebookresearch/detectron2), [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark) 和 [SimpleDet](https://github.com/TuSimple/simpledet)。

- **性能高**

  MMDetection 这个算法库源自于 COCO 2018 目标检测竞赛的冠军团队 *MMDet* 团队开发的代码,我们在之后持续进行了改进和提升。

除了 MMDetection 之外,我们还开源了计算机视觉基础库 [MMCV](https://github.com/open-mmlab/mmcv),MMCV 是 MMDetection 的主要依赖。

## 开源许可证

该项目采用 [Apache 2.0 开源许可证](LICENSE)。

## 更新日志

最新的月度版本 v2.12.0 在 2021.05.01 发布。
如果想了解更多版本更新细节和历史信息,请阅读[更新日志](docs/changelog.md)。
在[兼容性说明文档](docs/compatibility.md)中我们提供了 1.x 和 2.0 版本的详细比较。

## 基准测试和模型库

测试结果和模型可以在[模型库](docs/model_zoo.md)中找到。

已支持的骨干网络:

- [x] ResNet (CVPR'2016)
- [x] ResNeXt (CVPR'2017)
- [x] VGG (ICLR'2015)
- [x] HRNet (CVPR'2019)
- [x] RegNet (CVPR'2020)
- [x] Res2Net (TPAMI'2020)
- [x] ResNeSt (ArXiv'2020)

已支持的算法:

- [x] [RPN (NeurIPS'2015)](configs/rpn)
- [x] [Fast R-CNN (ICCV'2015)](configs/fast_rcnn)
- [x] [Faster R-CNN (NeurIPS'2015)](configs/faster_rcnn)
- [x] [Mask R-CNN (ICCV'2017)](configs/mask_rcnn)
- [x] [Cascade R-CNN (CVPR'2018)](configs/cascade_rcnn)
- [x] [Cascade Mask R-CNN (CVPR'2018)](configs/cascade_rcnn)
- [x] [SSD (ECCV'2016)](configs/ssd)
- [x] [RetinaNet (ICCV'2017)](configs/retinanet)
- [x] [GHM (AAAI'2019)](configs/ghm)
- [x] [Mask Scoring R-CNN (CVPR'2019)](configs/ms_rcnn)
- [x] [Double-Head R-CNN (CVPR'2020)](configs/double_heads)
- [x] [Hybrid Task Cascade (CVPR'2019)](configs/htc)
- [x] [Libra R-CNN (CVPR'2019)](configs/libra_rcnn)
- [x] [Guided Anchoring (CVPR'2019)](configs/guided_anchoring)
- [x] [FCOS (ICCV'2019)](configs/fcos)
- [x] [RepPoints (ICCV'2019)](configs/reppoints)
- [x] [Foveabox (TIP'2020)](configs/foveabox)
- [x] [FreeAnchor (NeurIPS'2019)](configs/free_anchor)
- [x] [NAS-FPN (CVPR'2019)](configs/nas_fpn)
- [x] [ATSS (CVPR'2020)](configs/atss)
- [x] [FSAF (CVPR'2019)](configs/fsaf)
- [x] [PAFPN (CVPR'2018)](configs/pafpn)
- [x] [Dynamic R-CNN (ECCV'2020)](configs/dynamic_rcnn)
- [x] [PointRend (CVPR'2020)](configs/point_rend)
- [x] [CARAFE (ICCV'2019)](configs/carafe/README.md)
- [x] [DCNv2 (CVPR'2019)](configs/dcn/README.md)
- [x] [Group Normalization (ECCV'2018)](configs/gn/README.md)
- [x] [Weight Standardization (ArXiv'2019)](configs/gn+ws/README.md)
- [x] [OHEM (CVPR'2016)](configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py)
- [x] [Soft-NMS (ICCV'2017)](configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py)
- [x] [Generalized Attention (ICCV'2019)](configs/empirical_attention/README.md)
- [x] [GCNet (ICCVW'2019)](configs/gcnet/README.md)
- [x] [Mixed Precision (FP16) Training (ArXiv'2017)](configs/fp16/README.md)
- [x] [InstaBoost (ICCV'2019)](configs/instaboost/README.md)
- [x] [GRoIE (ICPR'2020)](configs/groie/README.md)
- [x] [DetectoRS (ArXix'2020)](configs/detectors/README.md)
- [x] [Generalized Focal Loss (NeurIPS'2020)](configs/gfl/README.md)
- [x] [CornerNet (ECCV'2018)](configs/cornernet/README.md)
- [x] [Side-Aware Boundary Localization (ECCV'2020)](configs/sabl/README.md)
- [x] [YOLOv3 (ArXiv'2018)](configs/yolo/README.md)
- [x] [PAA (ECCV'2020)](configs/paa/README.md)
- [x] [YOLACT (ICCV'2019)](configs/yolact/README.md)
- [x] [CentripetalNet (CVPR'2020)](configs/centripetalnet/README.md)
- [x] [VFNet (ArXix'2020)](configs/vfnet/README.md)
- [x] [DETR (ECCV'2020)](configs/detr/README.md)
- [x] [Deformable DETR (ICLR'2021)](configs/deformable_detr/README.md)
- [x] [CascadeRPN (NeurIPS'2019)](configs/cascade_rpn/README.md)
- [x] [SCNet (AAAI'2021)](configs/scnet/README.md)
- [x] [AutoAssign (ArXix'2020)](configs/autoassign/README.md)
- [x] [YOLOF (CVPR'2021)](configs/yolof/README.md)

我们在[基于 MMDetection 的项目](./docs/projects.md)中列举了一些其他的支持的算法。

## 安装

请参考[快速入门文档](docs/get_started.md)进行安装。

## 快速入门

请参考[快速入门文档](docs/get_started.md)学习 MMDetection 的基本使用。
我们提供了 [colab 教程](demo/MMDet_Tutorial.ipynb),也为新手提供了完整的运行教程,分别针对[已有数据集](docs/1_exist_data_model.md)和[新数据集](docs/2_new_data_model.md) 完整的使用指南

我们也提供了一些进阶教程,内容覆盖了 [finetune 模型](docs/tutorials/finetune.md),[增加新数据集支持](docs/tutorials/new_dataset.md),[设计新的数据预处理流程](docs/tutorials/data_pipeline.md),[增加自定义模型](ocs/tutorials/customize_models.md),[增加自定义的运行时配置](docs/tutorials/customize_runtime.md),[常用工具和脚本](docs/useful_tools.md)。

如果遇到问题,请参考 [FAQ 页面](docs/faq.md)。

## 贡献指南

我们感谢所有的贡献者为改进和提升 MMDetection 所作出的努力。请参考[贡献指南](.github/CONTRIBUTING.md)来了解参与项目贡献的相关指引。

## 致谢

MMDetection 是一款由来自不同高校和企业的研发人员共同参与贡献的开源项目。我们感谢所有为项目提供算法复现和新功能支持的贡献者,以及提供宝贵反馈的用户。 我们希望这个工具箱和基准测试可以为社区提供灵活的代码工具,供用户复现已有算法并开发自己的新模型,从而不断为开源社区提供贡献。

## 引用

如果你在研究中使用了本项目的代码或者性能基准,请参考如下 bibtex 引用 MMDetection。

```
@article{mmdetection,
  title   = {{MMDetection}: Open MMLab Detection Toolbox and Benchmark},
  author  = {Chen, Kai and Wang, Jiaqi and Pang, Jiangmiao and Cao, Yuhang and
             Xiong, Yu and Li, Xiaoxiao and Sun, Shuyang and Feng, Wansen and
             Liu, Ziwei and Xu, Jiarui and Zhang, Zheng and Cheng, Dazhi and
             Zhu, Chenchen and Cheng, Tianheng and Zhao, Qijie and Li, Buyu and
             Lu, Xin and Zhu, Rui and Wu, Yue and Dai, Jifeng and Wang, Jingdong
             and Shi, Jianping and Ouyang, Wanli and Loy, Chen Change and Lin, Dahua},
  journal= {arXiv preprint arXiv:1906.07155},
  year={2019}
}
```

## OpenMMLab 的其他项目

- [MMCV](https://github.com/open-mmlab/mmcv): OpenMMLab 计算机视觉基础库
- [MMClassification](https://github.com/open-mmlab/mmclassification): OpenMMLab 图像分类工具箱
- [MMDetection](https://github.com/open-mmlab/mmdetection): OpenMMLab 目标检测工具箱
- [MMDetection3D](https://github.com/open-mmlab/mmdetection3d): OpenMMLab 新一代通用 3D 目标检测平台
- [MMSegmentation](https://github.com/open-mmlab/mmsegmentation): OpenMMLab 语义分割工具箱
- [MMAction2](https://github.com/open-mmlab/mmaction2): OpenMMLab 新一代视频理解工具箱
- [MMTracking](https://github.com/open-mmlab/mmtracking): OpenMMLab 一体化视频目标感知平台
- [MMPose](https://github.com/open-mmlab/mmpose): OpenMMLab 姿态估计工具箱
- [MMEditing](https://github.com/open-mmlab/mmediting): OpenMMLab 图像视频编辑工具箱
- [MMOCR](https://github.com/open-mmlab/mmocr): OpenMMLab 全流程文字检测识别理解工具包
- [MMGeneration](https://github.com/open-mmlab/mmgeneration): OpenMMLab 图片视频生成模型工具箱

## 欢迎加入 OpenMMLab 社区

扫描下方的二维码可关注 OpenMMLab 团队的 [知乎官方账号](https://www.zhihu.com/people/openmmlab),加入 OpenMMLab 团队的 [官方交流 QQ 群](https://jq.qq.com/?_wv=1027&k=aCvMxdr3)

<div align="center">
<img src="/resources/zhihu_qrcode.jpg" height="400" />  <img src="/resources/qq_group_qrcode.jpg" height="400" />
</div>

我们会在 OpenMMLab 社区为大家

- 📢 分享 AI 框架的前沿核心技术
- 💻 解读 PyTorch 常用模块源码
- 📰 发布 OpenMMLab 的相关新闻
- 🚀 介绍 OpenMMLab 开发的前沿算法
- 🏃 获取更高效的问题答疑和意见反馈
- 🔥 提供与各行各业开发者充分交流的平台

干货满满 📘,等你来撩 💗,OpenMMLab 社区期待您的加入 👬


================================================
FILE: configs/_base_/datasets/cityscapes_detection.py
================================================
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(
        type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(2048, 1024),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    samples_per_gpu=1,
    workers_per_gpu=2,
    train=dict(
        type='RepeatDataset',
        times=8,
        dataset=dict(
            type=dataset_type,
            ann_file=data_root +
            'annotations/instancesonly_filtered_gtFine_train.json',
            img_prefix=data_root + 'leftImg8bit/train/',
            pipeline=train_pipeline)),
    val=dict(
        type=dataset_type,
        ann_file=data_root +
        'annotations/instancesonly_filtered_gtFine_val.json',
        img_prefix=data_root + 'leftImg8bit/val/',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        ann_file=data_root +
        'annotations/instancesonly_filtered_gtFine_test.json',
        img_prefix=data_root + 'leftImg8bit/test/',
        pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')


================================================
FILE: configs/_base_/datasets/cityscapes_instance.py
================================================
# dataset settings
dataset_type = 'CityscapesDataset'
data_root = 'data/cityscapes/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    dict(
        type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(2048, 1024),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    samples_per_gpu=1,
    workers_per_gpu=2,
    train=dict(
        type='RepeatDataset',
        times=8,
        dataset=dict(
            type=dataset_type,
            ann_file=data_root +
            'annotations/instancesonly_filtered_gtFine_train.json',
            img_prefix=data_root + 'leftImg8bit/train/',
            pipeline=train_pipeline)),
    val=dict(
        type=dataset_type,
        ann_file=data_root +
        'annotations/instancesonly_filtered_gtFine_val.json',
        img_prefix=data_root + 'leftImg8bit/val/',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        ann_file=data_root +
        'annotations/instancesonly_filtered_gtFine_test.json',
        img_prefix=data_root + 'leftImg8bit/test/',
        pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])


================================================
FILE: configs/_base_/datasets/coco_detection.py
================================================
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    samples_per_gpu=2,
    workers_per_gpu=2,
    train=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_train2017.json',
        img_prefix=data_root + 'train2017/',
        pipeline=train_pipeline),
    val=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_val2017.json',
        img_prefix=data_root + 'val2017/',
        pipeline=test_pipeline),
#     test=dict(
#         type=dataset_type,
#         ann_file=data_root + 'annotations/image_info_test-dev2017.json',
#         img_prefix=data_root + 'test2017/',
#         pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_val2017.json',
        img_prefix=data_root + 'val2017/',
        pipeline=test_pipeline)
)
evaluation = dict(interval=1, metric='bbox')


================================================
FILE: configs/_base_/datasets/coco_detection_tiny.py
================================================
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='Resize', img_scale=(800, 480), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(800, 480),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    samples_per_gpu=2,
    workers_per_gpu=2,
    train=dict(
        _delete_=True,
        type='RepeatDataset',
        times=2,
        dataset=dict(
            type=dataset_type,
            ann_file=data_root + 'annotations/instances_train2017_subset_9929.json',
            # ann_file=data_root + 'annotations/instances_train2017_subset_99.json',
            img_prefix=data_root + 'train2017/',
            pipeline=train_pipeline)
    ),
    val=dict(
        type=dataset_type,
        # ann_file=data_root + 'annotations/instances_val2017.json',
        ann_file=data_root + 'annotations/instances_train2017_subset_99.json',
        img_prefix=data_root + 'train2017/',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_val2017.json',
        img_prefix=data_root + 'val2017/',
        pipeline=test_pipeline))
evaluation = dict(interval=1, metric='bbox')


================================================
FILE: configs/_base_/datasets/coco_instance.py
================================================
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    samples_per_gpu=2,
    workers_per_gpu=2,
    train=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_train2017.json',
        img_prefix=data_root + 'train2017/',
        pipeline=train_pipeline),
    val=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_val2017.json',
        img_prefix=data_root + 'val2017/',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_val2017.json',
        img_prefix=data_root + 'val2017/',
        pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])


================================================
FILE: configs/_base_/datasets/coco_instance_semantic.py
================================================
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='SegRescale', scale_factor=1 / 8),
    dict(type='DefaultFormatBundle'),
    dict(
        type='Collect',
        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip', flip_ratio=0.5),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    samples_per_gpu=2,
    workers_per_gpu=2,
    train=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_train2017.json',
        img_prefix=data_root + 'train2017/',
        seg_prefix=data_root + 'stuffthingmaps/train2017/',
        pipeline=train_pipeline),
    val=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_val2017.json',
        img_prefix=data_root + 'val2017/',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_val2017.json',
        img_prefix=data_root + 'val2017/',
        pipeline=test_pipeline))
evaluation = dict(metric=['bbox', 'segm'])


================================================
FILE: configs/_base_/datasets/deepfashion.py
================================================
# dataset settings
dataset_type = 'DeepFashionDataset'
data_root = 'data/DeepFashion/In-shop/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    dict(type='Resize', img_scale=(750, 1101), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(750, 1101),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    imgs_per_gpu=2,
    workers_per_gpu=1,
    train=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
        img_prefix=data_root + 'Img/',
        pipeline=train_pipeline,
        data_root=data_root),
    val=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/DeepFashion_segmentation_query.json',
        img_prefix=data_root + 'Img/',
        pipeline=test_pipeline,
        data_root=data_root),
    test=dict(
        type=dataset_type,
        ann_file=data_root +
        'annotations/DeepFashion_segmentation_gallery.json',
        img_prefix=data_root + 'Img/',
        pipeline=test_pipeline,
        data_root=data_root))
evaluation = dict(interval=5, metric=['bbox', 'segm'])


================================================
FILE: configs/_base_/datasets/lvis_v0.5_instance.py
================================================
# dataset settings
_base_ = 'coco_instance.py'
dataset_type = 'LVISV05Dataset'
data_root = 'data/lvis_v0.5/'
data = dict(
    samples_per_gpu=2,
    workers_per_gpu=2,
    train=dict(
        _delete_=True,
        type='ClassBalancedDataset',
        oversample_thr=1e-3,
        dataset=dict(
            type=dataset_type,
            ann_file=data_root + 'annotations/lvis_v0.5_train.json',
            img_prefix=data_root + 'train2017/')),
    val=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/lvis_v0.5_val.json',
        img_prefix=data_root + 'val2017/'),
    test=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/lvis_v0.5_val.json',
        img_prefix=data_root + 'val2017/'))
evaluation = dict(metric=['bbox', 'segm'])


================================================
FILE: configs/_base_/datasets/lvis_v1_instance.py
================================================
# dataset settings
_base_ = 'coco_instance.py'
dataset_type = 'LVISV1Dataset'
data_root = 'data/lvis_v1/'
data = dict(
    samples_per_gpu=2,
    workers_per_gpu=2,
    train=dict(
        _delete_=True,
        type='ClassBalancedDataset',
        oversample_thr=1e-3,
        dataset=dict(
            type=dataset_type,
            ann_file=data_root + 'annotations/lvis_v1_train.json',
            img_prefix=data_root)),
    val=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/lvis_v1_val.json',
        img_prefix=data_root),
    test=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/lvis_v1_val.json',
        img_prefix=data_root))
evaluation = dict(metric=['bbox', 'segm'])


================================================
FILE: configs/_base_/datasets/voc0712.py
================================================
# dataset settings
dataset_type = 'VOCDataset'
data_root = 'data/VOCdevkit/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='Resize', img_scale=(1000, 600), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1000, 600),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    samples_per_gpu=2,
    workers_per_gpu=2,
    train=dict(
        type='RepeatDataset',
        times=3,
        dataset=dict(
            type=dataset_type,
            ann_file=[
                data_root + 'VOC2007/ImageSets/Main/trainval.txt',
                data_root + 'VOC2012/ImageSets/Main/trainval.txt'
            ],
            img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'],
            pipeline=train_pipeline)),
    val=dict(
        type=dataset_type,
        ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
        img_prefix=data_root + 'VOC2007/',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt',
        img_prefix=data_root + 'VOC2007/',
        pipeline=test_pipeline))
evaluation = dict(interval=1, metric='mAP')


================================================
FILE: configs/_base_/datasets/wider_face.py
================================================
# dataset settings
dataset_type = 'WIDERFaceDataset'
data_root = 'data/WIDERFace/'
img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile', to_float32=True),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(
        type='PhotoMetricDistortion',
        brightness_delta=32,
        contrast_range=(0.5, 1.5),
        saturation_range=(0.5, 1.5),
        hue_delta=18),
    dict(
        type='Expand',
        mean=img_norm_cfg['mean'],
        to_rgb=img_norm_cfg['to_rgb'],
        ratio_range=(1, 4)),
    dict(
        type='MinIoURandomCrop',
        min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
        min_crop_size=0.3),
    dict(type='Resize', img_scale=(300, 300), keep_ratio=False),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(300, 300),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=False),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    samples_per_gpu=60,
    workers_per_gpu=2,
    train=dict(
        type='RepeatDataset',
        times=2,
        dataset=dict(
            type=dataset_type,
            ann_file=data_root + 'train.txt',
            img_prefix=data_root + 'WIDER_train/',
            min_size=17,
            pipeline=train_pipeline)),
    val=dict(
        type=dataset_type,
        ann_file=data_root + 'val.txt',
        img_prefix=data_root + 'WIDER_val/',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        ann_file=data_root + 'val.txt',
        img_prefix=data_root + 'WIDER_val/',
        pipeline=test_pipeline))


================================================
FILE: configs/_base_/default_runtime.py
================================================
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
    interval=50,
    hooks=[
        dict(type='TextLoggerHook'),
        # dict(type='TensorboardLoggerHook')
    ])
# yapf:enable
custom_hooks = [dict(type='NumClassCheckHook')]

dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]


================================================
FILE: configs/_base_/models/cascade_mask_rcnn_r50_fpn.py
================================================
# model settings
model = dict(
    type='CascadeRCNN',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        num_outs=5),
    rpn_head=dict(
        type='RPNHead',
        in_channels=256,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[8],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
    roi_head=dict(
        type='CascadeRoIHead',
        num_stages=3,
        stage_loss_weights=[1, 0.5, 0.25],
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32]),
        bbox_head=[
            dict(
                type='Shared2FCBBoxHead',
                in_channels=256,
                fc_out_channels=1024,
                roi_feat_size=7,
                num_classes=80,
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    target_means=[0., 0., 0., 0.],
                    target_stds=[0.1, 0.1, 0.2, 0.2]),
                reg_class_agnostic=True,
                loss_cls=dict(
                    type='CrossEntropyLoss',
                    use_sigmoid=False,
                    loss_weight=1.0),
                loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
                               loss_weight=1.0)),
            dict(
                type='Shared2FCBBoxHead',
                in_channels=256,
                fc_out_channels=1024,
                roi_feat_size=7,
                num_classes=80,
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    target_means=[0., 0., 0., 0.],
                    target_stds=[0.05, 0.05, 0.1, 0.1]),
                reg_class_agnostic=True,
                loss_cls=dict(
                    type='CrossEntropyLoss',
                    use_sigmoid=False,
                    loss_weight=1.0),
                loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
                               loss_weight=1.0)),
            dict(
                type='Shared2FCBBoxHead',
                in_channels=256,
                fc_out_channels=1024,
                roi_feat_size=7,
                num_classes=80,
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    target_means=[0., 0., 0., 0.],
                    target_stds=[0.033, 0.033, 0.067, 0.067]),
                reg_class_agnostic=True,
                loss_cls=dict(
                    type='CrossEntropyLoss',
                    use_sigmoid=False,
                    loss_weight=1.0),
                loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
        ],
        mask_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32]),
        mask_head=dict(
            type='FCNMaskHead',
            num_convs=4,
            in_channels=256,
            conv_out_channels=256,
            num_classes=80,
            loss_mask=dict(
                type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
    # model training and testing settings
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=0,
            pos_weight=-1,
            debug=False),
        rpn_proposal=dict(
            nms_pre=2000,
            max_per_img=2000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=[
            dict(
                assigner=dict(
                    type='MaxIoUAssigner',
                    pos_iou_thr=0.5,
                    neg_iou_thr=0.5,
                    min_pos_iou=0.5,
                    match_low_quality=False,
                    ignore_iof_thr=-1),
                sampler=dict(
                    type='RandomSampler',
                    num=512,
                    pos_fraction=0.25,
                    neg_pos_ub=-1,
                    add_gt_as_proposals=True),
                mask_size=28,
                pos_weight=-1,
                debug=False),
            dict(
                assigner=dict(
                    type='MaxIoUAssigner',
                    pos_iou_thr=0.6,
                    neg_iou_thr=0.6,
                    min_pos_iou=0.6,
                    match_low_quality=False,
                    ignore_iof_thr=-1),
                sampler=dict(
                    type='RandomSampler',
                    num=512,
                    pos_fraction=0.25,
                    neg_pos_ub=-1,
                    add_gt_as_proposals=True),
                mask_size=28,
                pos_weight=-1,
                debug=False),
            dict(
                assigner=dict(
                    type='MaxIoUAssigner',
                    pos_iou_thr=0.7,
                    neg_iou_thr=0.7,
                    min_pos_iou=0.7,
                    match_low_quality=False,
                    ignore_iof_thr=-1),
                sampler=dict(
                    type='RandomSampler',
                    num=512,
                    pos_fraction=0.25,
                    neg_pos_ub=-1,
                    add_gt_as_proposals=True),
                mask_size=28,
                pos_weight=-1,
                debug=False)
        ]),
    test_cfg=dict(
        rpn=dict(
            nms_pre=1000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100,
            mask_thr_binary=0.5)))


================================================
FILE: configs/_base_/models/cascade_rcnn_r50_fpn.py
================================================
# model settings
model = dict(
    type='CascadeRCNN',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        num_outs=5),
    rpn_head=dict(
        type='RPNHead',
        in_channels=256,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[8],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
    roi_head=dict(
        type='CascadeRoIHead',
        num_stages=3,
        stage_loss_weights=[1, 0.5, 0.25],
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32]),
        bbox_head=[
            dict(
                type='Shared2FCBBoxHead',
                in_channels=256,
                fc_out_channels=1024,
                roi_feat_size=7,
                num_classes=80,
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    target_means=[0., 0., 0., 0.],
                    target_stds=[0.1, 0.1, 0.2, 0.2]),
                reg_class_agnostic=True,
                loss_cls=dict(
                    type='CrossEntropyLoss',
                    use_sigmoid=False,
                    loss_weight=1.0),
                loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
                               loss_weight=1.0)),
            dict(
                type='Shared2FCBBoxHead',
                in_channels=256,
                fc_out_channels=1024,
                roi_feat_size=7,
                num_classes=80,
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    target_means=[0., 0., 0., 0.],
                    target_stds=[0.05, 0.05, 0.1, 0.1]),
                reg_class_agnostic=True,
                loss_cls=dict(
                    type='CrossEntropyLoss',
                    use_sigmoid=False,
                    loss_weight=1.0),
                loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
                               loss_weight=1.0)),
            dict(
                type='Shared2FCBBoxHead',
                in_channels=256,
                fc_out_channels=1024,
                roi_feat_size=7,
                num_classes=80,
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    target_means=[0., 0., 0., 0.],
                    target_stds=[0.033, 0.033, 0.067, 0.067]),
                reg_class_agnostic=True,
                loss_cls=dict(
                    type='CrossEntropyLoss',
                    use_sigmoid=False,
                    loss_weight=1.0),
                loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
        ]),
    # model training and testing settings
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=0,
            pos_weight=-1,
            debug=False),
        rpn_proposal=dict(
            nms_pre=2000,
            max_per_img=2000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=[
            dict(
                assigner=dict(
                    type='MaxIoUAssigner',
                    pos_iou_thr=0.5,
                    neg_iou_thr=0.5,
                    min_pos_iou=0.5,
                    match_low_quality=False,
                    ignore_iof_thr=-1),
                sampler=dict(
                    type='RandomSampler',
                    num=512,
                    pos_fraction=0.25,
                    neg_pos_ub=-1,
                    add_gt_as_proposals=True),
                pos_weight=-1,
                debug=False),
            dict(
                assigner=dict(
                    type='MaxIoUAssigner',
                    pos_iou_thr=0.6,
                    neg_iou_thr=0.6,
                    min_pos_iou=0.6,
                    match_low_quality=False,
                    ignore_iof_thr=-1),
                sampler=dict(
                    type='RandomSampler',
                    num=512,
                    pos_fraction=0.25,
                    neg_pos_ub=-1,
                    add_gt_as_proposals=True),
                pos_weight=-1,
                debug=False),
            dict(
                assigner=dict(
                    type='MaxIoUAssigner',
                    pos_iou_thr=0.7,
                    neg_iou_thr=0.7,
                    min_pos_iou=0.7,
                    match_low_quality=False,
                    ignore_iof_thr=-1),
                sampler=dict(
                    type='RandomSampler',
                    num=512,
                    pos_fraction=0.25,
                    neg_pos_ub=-1,
                    add_gt_as_proposals=True),
                pos_weight=-1,
                debug=False)
        ]),
    test_cfg=dict(
        rpn=dict(
            nms_pre=1000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100)))


================================================
FILE: configs/_base_/models/fast_rcnn_r50_fpn.py
================================================
# model settings
model = dict(
    type='FastRCNN',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        num_outs=5),
    roi_head=dict(
        type='StandardRoIHead',
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32]),
        bbox_head=dict(
            type='Shared2FCBBoxHead',
            in_channels=256,
            fc_out_channels=1024,
            roi_feat_size=7,
            num_classes=80,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0., 0., 0., 0.],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
    # model training and testing settings
    train_cfg=dict(
        rcnn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.5,
                min_pos_iou=0.5,
                match_low_quality=False,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=512,
                pos_fraction=0.25,
                neg_pos_ub=-1,
                add_gt_as_proposals=True),
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100)))


================================================
FILE: configs/_base_/models/faster_rcnn_r50_caffe_c4.py
================================================
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
    type='FasterRCNN',
    pretrained='open-mmlab://detectron2/resnet50_caffe',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=3,
        strides=(1, 2, 2),
        dilations=(1, 1, 1),
        out_indices=(2, ),
        frozen_stages=1,
        norm_cfg=norm_cfg,
        norm_eval=True,
        style='caffe'),
    rpn_head=dict(
        type='RPNHead',
        in_channels=1024,
        feat_channels=1024,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[2, 4, 8, 16, 32],
            ratios=[0.5, 1.0, 2.0],
            strides=[16]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    roi_head=dict(
        type='StandardRoIHead',
        shared_head=dict(
            type='ResLayer',
            depth=50,
            stage=3,
            stride=2,
            dilation=1,
            style='caffe',
            norm_cfg=norm_cfg,
            norm_eval=True),
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
            out_channels=1024,
            featmap_strides=[16]),
        bbox_head=dict(
            type='BBoxHead',
            with_avg_pool=True,
            roi_feat_size=7,
            in_channels=2048,
            num_classes=80,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0., 0., 0., 0.],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
    # model training and testing settings
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=0,
            pos_weight=-1,
            debug=False),
        rpn_proposal=dict(
            nms_pre=12000,
            max_per_img=2000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.5,
                min_pos_iou=0.5,
                match_low_quality=False,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=512,
                pos_fraction=0.25,
                neg_pos_ub=-1,
                add_gt_as_proposals=True),
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rpn=dict(
            nms_pre=6000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100)))


================================================
FILE: configs/_base_/models/faster_rcnn_r50_caffe_dc5.py
================================================
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
    type='FasterRCNN',
    pretrained='open-mmlab://detectron2/resnet50_caffe',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        strides=(1, 2, 2, 1),
        dilations=(1, 1, 1, 2),
        out_indices=(3, ),
        frozen_stages=1,
        norm_cfg=norm_cfg,
        norm_eval=True,
        style='caffe'),
    rpn_head=dict(
        type='RPNHead',
        in_channels=2048,
        feat_channels=2048,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[2, 4, 8, 16, 32],
            ratios=[0.5, 1.0, 2.0],
            strides=[16]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    roi_head=dict(
        type='StandardRoIHead',
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
            out_channels=2048,
            featmap_strides=[16]),
        bbox_head=dict(
            type='Shared2FCBBoxHead',
            in_channels=2048,
            fc_out_channels=1024,
            roi_feat_size=7,
            num_classes=80,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0., 0., 0., 0.],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
    # model training and testing settings
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=0,
            pos_weight=-1,
            debug=False),
        rpn_proposal=dict(
            nms_pre=12000,
            max_per_img=2000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.5,
                min_pos_iou=0.5,
                match_low_quality=False,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=512,
                pos_fraction=0.25,
                neg_pos_ub=-1,
                add_gt_as_proposals=True),
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rpn=dict(
            nms=dict(type='nms', iou_threshold=0.7),
            nms_pre=6000,
            max_per_img=1000,
            min_bbox_size=0),
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100)))


================================================
FILE: configs/_base_/models/faster_rcnn_r50_fpn.py
================================================
# model settings
model = dict(
    type='FasterRCNN',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        num_outs=5),
    rpn_head=dict(
        type='RPNHead',
        in_channels=256,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[8],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    roi_head=dict(
        type='StandardRoIHead',
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32]),
        bbox_head=dict(
            type='Shared2FCBBoxHead',
            in_channels=256,
            fc_out_channels=1024,
            roi_feat_size=7,
            num_classes=80,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0., 0., 0., 0.],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
    # model training and testing settings
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=-1,
            pos_weight=-1,
            debug=False),
        rpn_proposal=dict(
            nms_pre=2000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.5,
                min_pos_iou=0.5,
                match_low_quality=False,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=512,
                pos_fraction=0.25,
                neg_pos_ub=-1,
                add_gt_as_proposals=True),
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rpn=dict(
            nms_pre=1000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100)
        # soft-nms is also supported for rcnn testing
        # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
    ))


================================================
FILE: configs/_base_/models/mask_rcnn_r50_caffe_c4.py
================================================
# model settings
norm_cfg = dict(type='BN', requires_grad=False)
model = dict(
    type='MaskRCNN',
    pretrained='open-mmlab://detectron2/resnet50_caffe',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=3,
        strides=(1, 2, 2),
        dilations=(1, 1, 1),
        out_indices=(2, ),
        frozen_stages=1,
        norm_cfg=norm_cfg,
        norm_eval=True,
        style='caffe'),
    rpn_head=dict(
        type='RPNHead',
        in_channels=1024,
        feat_channels=1024,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[2, 4, 8, 16, 32],
            ratios=[0.5, 1.0, 2.0],
            strides=[16]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    roi_head=dict(
        type='StandardRoIHead',
        shared_head=dict(
            type='ResLayer',
            depth=50,
            stage=3,
            stride=2,
            dilation=1,
            style='caffe',
            norm_cfg=norm_cfg,
            norm_eval=True),
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
            out_channels=1024,
            featmap_strides=[16]),
        bbox_head=dict(
            type='BBoxHead',
            with_avg_pool=True,
            roi_feat_size=7,
            in_channels=2048,
            num_classes=80,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0., 0., 0., 0.],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
        mask_roi_extractor=None,
        mask_head=dict(
            type='FCNMaskHead',
            num_convs=0,
            in_channels=2048,
            conv_out_channels=256,
            num_classes=80,
            loss_mask=dict(
                type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
    # model training and testing settings
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=0,
            pos_weight=-1,
            debug=False),
        rpn_proposal=dict(
            nms_pre=12000,
            max_per_img=2000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.5,
                min_pos_iou=0.5,
                match_low_quality=False,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=512,
                pos_fraction=0.25,
                neg_pos_ub=-1,
                add_gt_as_proposals=True),
            mask_size=14,
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rpn=dict(
            nms_pre=6000,
            nms=dict(type='nms', iou_threshold=0.7),
            max_per_img=1000,
            min_bbox_size=0),
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100,
            mask_thr_binary=0.5)))


================================================
FILE: configs/_base_/models/mask_rcnn_r50_fpn.py
================================================
# model settings
model = dict(
    type='MaskRCNN',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        num_outs=5),
    rpn_head=dict(
        type='RPNHead',
        in_channels=256,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[8],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    roi_head=dict(
        type='StandardRoIHead',
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32]),
        bbox_head=dict(
            type='Shared2FCBBoxHead',
            in_channels=256,
            fc_out_channels=1024,
            roi_feat_size=7,
            num_classes=80,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0., 0., 0., 0.],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
        mask_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32]),
        mask_head=dict(
            type='FCNMaskHead',
            num_convs=4,
            in_channels=256,
            conv_out_channels=256,
            num_classes=80,
            loss_mask=dict(
                type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
    # model training and testing settings
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=-1,
            pos_weight=-1,
            debug=False),
        rpn_proposal=dict(
            nms_pre=2000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.5,
                neg_iou_thr=0.5,
                min_pos_iou=0.5,
                match_low_quality=True,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=512,
                pos_fraction=0.25,
                neg_pos_ub=-1,
                add_gt_as_proposals=True),
            mask_size=28,
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rpn=dict(
            nms_pre=1000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0),
        rcnn=dict(
            score_thr=0.05,
            nms=dict(type='nms', iou_threshold=0.5),
            max_per_img=100,
            mask_thr_binary=0.5)))


================================================
FILE: configs/_base_/models/retinanet_r50_fpn.py
================================================
# model settings
model = dict(
    type='RetinaNet',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        start_level=1,
        add_extra_convs='on_input',
        num_outs=5),
    bbox_head=dict(
        type='RetinaHead',
        num_classes=80,
        in_channels=256,
        stacked_convs=4,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            octave_base_scale=4,
            scales_per_octave=3,
            ratios=[0.5, 1.0, 2.0],
            strides=[8, 16, 32, 64, 128]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='FocalLoss',
            use_sigmoid=True,
            gamma=2.0,
            alpha=0.25,
            loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    # model training and testing settings
    train_cfg=dict(
        assigner=dict(
            type='MaxIoUAssigner',
            pos_iou_thr=0.5,
            neg_iou_thr=0.4,
            min_pos_iou=0,
            ignore_iof_thr=-1),
        allowed_border=-1,
        pos_weight=-1,
        debug=False),
    test_cfg=dict(
        nms_pre=1000,
        min_bbox_size=0,
        score_thr=0.05,
        nms=dict(type='nms', iou_threshold=0.5),
        max_per_img=100))


================================================
FILE: configs/_base_/models/rpn_r50_caffe_c4.py
================================================
# model settings
model = dict(
    type='RPN',
    pretrained='open-mmlab://detectron2/resnet50_caffe',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=3,
        strides=(1, 2, 2),
        dilations=(1, 1, 1),
        out_indices=(2, ),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=False),
        norm_eval=True,
        style='caffe'),
    neck=None,
    rpn_head=dict(
        type='RPNHead',
        in_channels=1024,
        feat_channels=1024,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[2, 4, 8, 16, 32],
            ratios=[0.5, 1.0, 2.0],
            strides=[16]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    # model training and testing settings
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=0,
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rpn=dict(
            nms_pre=12000,
            max_per_img=2000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0)))


================================================
FILE: configs/_base_/models/rpn_r50_fpn.py
================================================
# model settings
model = dict(
    type='RPN',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        num_outs=5),
    rpn_head=dict(
        type='RPNHead',
        in_channels=256,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            scales=[8],
            ratios=[0.5, 1.0, 2.0],
            strides=[4, 8, 16, 32, 64]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[1.0, 1.0, 1.0, 1.0]),
        loss_cls=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
        loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
    # model training and testing settings
    train_cfg=dict(
        rpn=dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.3,
                min_pos_iou=0.3,
                ignore_iof_thr=-1),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=0,
            pos_weight=-1,
            debug=False)),
    test_cfg=dict(
        rpn=dict(
            nms_pre=2000,
            max_per_img=1000,
            nms=dict(type='nms', iou_threshold=0.7),
            min_bbox_size=0)))


================================================
FILE: configs/_base_/models/ssd300.py
================================================
# model settings
input_size = 300
model = dict(
    type='SingleStageDetector',
    pretrained='open-mmlab://vgg16_caffe',
    backbone=dict(
        type='SSDVGG',
        input_size=input_size,
        depth=16,
        with_last_pool=False,
        ceil_mode=True,
        out_indices=(3, 4),
        out_feature_indices=(22, 34),
        l2_norm_scale=20),
    neck=None,
    bbox_head=dict(
        type='SSDHead',
        in_channels=(512, 1024, 512, 256, 256, 256),
        num_classes=80,
        anchor_generator=dict(
            type='SSDAnchorGenerator',
            scale_major=False,
            input_size=input_size,
            basesize_ratio_range=(0.15, 0.9),
            strides=[8, 16, 32, 64, 100, 300],
            ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[0.1, 0.1, 0.2, 0.2])),
    # model training and testing settings
    train_cfg=dict(
        assigner=dict(
            type='MaxIoUAssigner',
            pos_iou_thr=0.5,
            neg_iou_thr=0.5,
            min_pos_iou=0.,
            ignore_iof_thr=-1,
            gt_max_assign_all=False),
        smoothl1_beta=1.,
        allowed_border=-1,
        pos_weight=-1,
        neg_pos_ratio=3,
        debug=False),
    test_cfg=dict(
        nms_pre=1000,
        nms=dict(type='nms', iou_threshold=0.45),
        min_bbox_size=0,
        score_thr=0.02,
        max_per_img=200))
cudnn_benchmark = True


================================================
FILE: configs/_base_/schedules/schedule_1x.py
================================================
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=0.001,
    step=[8, 11])
runner = dict(type='EpochBasedRunner', max_epochs=12)


================================================
FILE: configs/_base_/schedules/schedule_20e.py
================================================
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=0.001,
    step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)


================================================
FILE: configs/_base_/schedules/schedule_2x.py
================================================
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=0.001,
    step=[16, 22])
runner = dict(type='EpochBasedRunner', max_epochs=24)


================================================
FILE: configs/adamixer/README.md
================================================


================================================
FILE: configs/adamixer/adamixer_dx101_300_query_crop_mstrain_480-800_3x_coco.py
================================================
_base_ = './adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py'

model = dict(
    pretrained='open-mmlab://resnext101_32x4d',
    backbone=dict(
        type='ResNeXt',
        depth=101,
        groups=32,
        base_width=4,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        style='pytorch',
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/adamixer/adamixer_r101_300_query_crop_mstrain_480-800_3x_coco.py
================================================
_base_ = './adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py'

model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))


================================================
FILE: configs/adamixer/adamixer_r101_mstrain_480-800_3x_coco.py
================================================
_base_ = './adamixer_r50_mstrain_480-800_3x_coco.py'

model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))


================================================
FILE: configs/adamixer/adamixer_r50_1x_coco.py
================================================
def __get_debug():
    import os
    return 'C_DEBUG' in os.environ


debug = __get_debug()

log_interval = 100


_base_ = [
    '../_base_/datasets/coco_detection.py',
    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
work_dir_prefix = 'work_dirs/adamixer_mmdet'

IMAGE_SCALE = (1333, 800)

# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='Resize', img_scale=IMAGE_SCALE, keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=IMAGE_SCALE,
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    samples_per_gpu=2,
    workers_per_gpu=2,
    train=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_train2017.json',
        img_prefix=data_root + 'train2017/',
        pipeline=train_pipeline),
    val=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/instances_val2017.json',
        img_prefix=data_root + 'val2017/',
        pipeline=test_pipeline),
    test=dict(
        type=dataset_type,
        ann_file=data_root + 'annotations/image_info_test-dev2017.json',
        img_prefix=data_root + 'test2017/',
        pipeline=test_pipeline),
)
evaluation = dict(interval=1, metric='bbox')


num_stages = 6
num_query = 100
QUERY_DIM = 256
FEAT_DIM = 256
FF_DIM = 2048

# P_in for spatial mixing in the paper.
in_points_list = [32, ] * num_stages

# P_out for spatial mixing in the paper. Also named as `out_points` in this codebase.
out_patterns_list = [128, ] * num_stages

# G for the mixer grouping in the paper. Please distinguishe it from num_heads in MHSA in this codebase.
n_group_list = [4, ] * num_stages

model = dict(
    type='QueryBased',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch'),
    neck=dict(
        type='ChannelMapping',
        in_channels=[256, 512, 1024, 2048],
        out_channels=FEAT_DIM,
        start_level=0,
        add_extra_convs='on_output',
        num_outs=4),
    rpn_head=dict(
        type='InitialQueryGenerator',
        num_query=num_query,
        content_dim=QUERY_DIM),
    roi_head=dict(
        type='AdaMixerDecoder',
        featmap_strides=[4, 8, 16, 32],
        num_stages=num_stages,
        stage_loss_weights=[1] * num_stages,
        content_dim=QUERY_DIM,
        bbox_head=[
            dict(
                type='AdaMixerDecoderStage',
                num_classes=80,
                num_ffn_fcs=2,
                num_heads=8,
                num_cls_fcs=1,
                num_reg_fcs=1,
                feedforward_channels=FF_DIM,
                content_dim=QUERY_DIM,
                feat_channels=FEAT_DIM,
                dropout=0.0,
                in_points=in_points_list[stage_idx],
                out_points=out_patterns_list[stage_idx],
                n_groups=n_group_list[stage_idx],
                ffn_act_cfg=dict(type='ReLU', inplace=True),
                loss_bbox=dict(type='L1Loss', loss_weight=5.0),
                loss_iou=dict(type='GIoULoss', loss_weight=2.0),
                loss_cls=dict(
                    type='FocalLoss',
                    use_sigmoid=True,
                    gamma=2.0,
                    alpha=0.25,
                    loss_weight=2.0),
                # NOTE: The following argument is a placeholder to hack the code. No real effects for decoding or updating bounding boxes.
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    clip_border=False,
                    target_means=[0., 0., 0., 0.],
                    target_stds=[0.5, 0.5, 1., 1.])) for stage_idx in range(num_stages)
        ]),
    # training and testing settings
    train_cfg=dict(
        rpn=None,
        rcnn=[
            dict(
                assigner=dict(
                    type='HungarianAssigner',
                    cls_cost=dict(type='FocalLossCost', weight=2.0),
                    reg_cost=dict(type='BBoxL1Cost', weight=5.0),
                    iou_cost=dict(type='IoUCost', iou_mode='giou',
                                  weight=2.0)),
                sampler=dict(type='PseudoSampler'),
                pos_weight=1) for _ in range(num_stages)
        ]),
    test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_query)))

# optimizer
optimizer = dict(
    _delete_=True,
    type='AdamW',
    lr=0.000025,
    weight_decay=0.0001,
)

optimizer_config = dict(
    _delete_=True, grad_clip=dict(max_norm=1.0, norm_type=2),
)

# learning policy
lr_config = dict(
    policy='step',
    step=[8, 11],
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=0.001
)
runner = dict(type='EpochBasedRunner', max_epochs=12)


def __date():
    import datetime
    return datetime.datetime.now().strftime('%m%d_%H%M')


log_config = dict(
    interval=log_interval,
    hooks=[
        dict(type='TextLoggerHook'),
        # dict(type='TensorboardLoggerHook')
    ]
)

postfix = '_' + __date()

find_unused_parameters = True


resume_from = None


================================================
FILE: configs/adamixer/adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py
================================================
_base_ = './adamixer_r50_mstrain_480-800_3x_coco.py'
num_query = 300
model = dict(
    rpn_head=dict(num_query=num_query),
    test_cfg=dict(
        _delete_=True, rpn=None, rcnn=dict(max_per_img=num_query)))
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)

# augmentation strategy originates from DETR.
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(
        type='AutoAugment',
        policies=[[
            dict(
                type='Resize',
                img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333),
                           (608, 1333), (640, 1333), (672, 1333), (704, 1333),
                           (736, 1333), (768, 1333), (800, 1333)],
                multiscale_mode='value',
                keep_ratio=True)
        ],
            [
            dict(
                type='Resize',
                img_scale=[(400, 1333), (500, 1333), (600, 1333)],
                multiscale_mode='value',
                keep_ratio=True),
            dict(
                type='RandomCrop',
                crop_type='absolute_range',
                crop_size=(384, 600),
                allow_negative_crop=True),
            dict(
                type='Resize',
                img_scale=[(480, 1333), (512, 1333), (544, 1333),
                           (576, 1333), (608, 1333), (640, 1333),
                           (672, 1333), (704, 1333), (736, 1333),
                           (768, 1333), (800, 1333)],
                multiscale_mode='value',
                override=True,
                keep_ratio=True)
        ]]),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
data = dict(train=dict(pipeline=train_pipeline))

lr_config = dict(policy='step', step=[24, 33])
runner = dict(type='EpochBasedRunner', max_epochs=36)


================================================
FILE: configs/adamixer/adamixer_r50_mstrain_480-800_3x_coco.py
================================================
_base_ = './adamixer_r50_1x_coco.py'

img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(
        type='Resize',
        img_scale=[(1333, value) for value in min_values],
        multiscale_mode='value',
        keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]

data = dict(train=dict(pipeline=train_pipeline))
lr_config = dict(policy='step', step=[27, 33])
runner = dict(type='EpochBasedRunner', max_epochs=36)


================================================
FILE: configs/adamixer/adamixer_swin_s_300_query_crop_mstrain_480-800_3x_coco.py
================================================
_base_ = './adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py'
pretrained = './swin_small_patch4_window7_224.pth'
model = dict(
    pretrained=None,
    backbone=dict(
        _delete_=True,
        type='SwinTransformer',
        embed_dims=96,
        depths=[2, 2, 18, 2],
        num_heads=[3, 6, 12, 24],
        window_size=7,
        mlp_ratio=4,
        qkv_bias=True,
        qk_scale=None,
        drop_rate=0.,
        attn_drop_rate=0.,
        drop_path_rate=0.2,
        patch_norm=True,
        out_indices=(0, 1, 2, 3),
        with_cp=False,
        convert_weights=True,
        init_cfg=dict(type='Pretrained', checkpoint=pretrained),
    ),
    neck=dict(in_channels=[96, 192, 384, 768])
)

optimizer = dict(
    _delete_=True,
    type='AdamW',
    lr=0.000025,
    weight_decay=0.0001,
    paramwise_cfg=dict(
        custom_keys={
            # Swin-related settings
            'absolute_pos_embed': dict(decay_mult=0.),
            'relative_position_bias_table': dict(decay_mult=0.),
            'norm': dict(decay_mult=0.)
        }
    )
)

lr_config = dict(warmup_iters=1000)


================================================
FILE: configs/albu_example/README.md
================================================
# Albu Example

<!-- [OTHERS] -->

```
@article{2018arXiv180906839B,
  author = {A. Buslaev, A. Parinov, E. Khvedchenya, V.~I. Iglovikov and A.~A. Kalinin},
  title = "{Albumentations: fast and flexible image augmentations}",
  journal = {ArXiv e-prints},
  eprint = {1809.06839},
  year = 2018
}
```

## Results and Models

| Backbone  | Style   | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:|
| R-50      | pytorch | 1x      | 4.4      | 16.6           |  38.0  | 34.5    |[config](https://github.com/open-mmlab/mmdetection/tree/master/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208-ab203bcd.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/albu_example/mask_rcnn_r50_fpn_albu_1x_coco/mask_rcnn_r50_fpn_albu_1x_coco_20200208_225520.log.json) |


================================================
FILE: configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py
================================================
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
albu_train_transforms = [
    dict(
        type='ShiftScaleRotate',
        shift_limit=0.0625,
        scale_limit=0.0,
        rotate_limit=0,
        interpolation=1,
        p=0.5),
    dict(
        type='RandomBrightnessContrast',
        brightness_limit=[0.1, 0.3],
        contrast_limit=[0.1, 0.3],
        p=0.2),
    dict(
        type='OneOf',
        transforms=[
            dict(
                type='RGBShift',
                r_shift_limit=10,
                g_shift_limit=10,
                b_shift_limit=10,
                p=1.0),
            dict(
                type='HueSaturationValue',
                hue_shift_limit=20,
                sat_shift_limit=30,
                val_shift_limit=20,
                p=1.0)
        ],
        p=0.1),
    dict(type='JpegCompression', quality_lower=85, quality_upper=95, p=0.2),
    dict(type='ChannelShuffle', p=0.1),
    dict(
        type='OneOf',
        transforms=[
            dict(type='Blur', blur_limit=3, p=1.0),
            dict(type='MedianBlur', blur_limit=3, p=1.0)
        ],
        p=0.1),
]
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='Pad', size_divisor=32),
    dict(
        type='Albu',
        transforms=albu_train_transforms,
        bbox_params=dict(
            type='BboxParams',
            format='pascal_voc',
            label_fields=['gt_labels'],
            min_visibility=0.0,
            filter_lost_elements=True),
        keymap={
            'img': 'image',
            'gt_masks': 'masks',
            'gt_bboxes': 'bboxes'
        },
        update_pad_shape=False,
        skip_img_without_anno=True),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='DefaultFormatBundle'),
    dict(
        type='Collect',
        keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks'],
        meta_keys=('filename', 'ori_shape', 'img_shape', 'img_norm_cfg',
                   'pad_shape', 'scale_factor'))
]
data = dict(train=dict(pipeline=train_pipeline))


================================================
FILE: configs/atss/README.md
================================================
# Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection

## Introduction

<!-- [ALGORITHM] -->

```latex
@article{zhang2019bridging,
  title   =  {Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection},
  author  =  {Zhang, Shifeng and Chi, Cheng and Yao, Yongqiang and Lei, Zhen and Li, Stan Z.},
  journal =  {arXiv preprint arXiv:1912.02424},
  year    =  {2019}
}
```

## Results and Models

| Backbone  | Style   | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
|:---------:|:-------:|:-------:|:--------:|:--------------:|:------:|:------:|:--------:|
| R-50      | pytorch | 1x      | 3.7      | 19.7           |  39.4  | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss/atss_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209-985f7bd0.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/atss/atss_r50_fpn_1x_coco/atss_r50_fpn_1x_coco_20200209_102539.log.json) |
| R-101     | pytorch | 1x      | 5.6      | 12.3           |  41.5  | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/atss/atss_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/atss/atss_r101_fpn_1x_coco/atss_r101_fpn_1x_20200825-dfcadd6f.log.json) |


================================================
FILE: configs/atss/atss_r101_fpn_1x_coco.py
================================================
_base_ = './atss_r50_fpn_1x_coco.py'
model = dict(
    pretrained='torchvision://resnet101',
    backbone=dict(depth=101),
)


================================================
FILE: configs/atss/atss_r50_fpn_1x_coco.py
================================================
_base_ = [
    '../_base_/datasets/coco_detection.py',
    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
    type='ATSS',
    pretrained='torchvision://resnet50',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        norm_eval=True,
        style='pytorch'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        start_level=1,
        add_extra_convs='on_output',
        num_outs=5),
    bbox_head=dict(
        type='ATSSHead',
        num_classes=80,
        in_channels=256,
        stacked_convs=4,
        feat_channels=256,
        anchor_generator=dict(
            type='AnchorGenerator',
            ratios=[1.0],
            octave_base_scale=8,
            scales_per_octave=1,
            strides=[8, 16, 32, 64, 128]),
        bbox_coder=dict(
            type='DeltaXYWHBBoxCoder',
            target_means=[.0, .0, .0, .0],
            target_stds=[0.1, 0.1, 0.2, 0.2]),
        loss_cls=dict(
            type='FocalLoss',
            use_sigmoid=True,
            gamma=2.0,
            alpha=0.25,
            loss_weight=1.0),
        loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
        loss_centerness=dict(
            type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)),
    # training and testing settings
    train_cfg=dict(
        assigner=dict(type='ATSSAssigner', topk=9),
        allowed_border=-1,
        pos_weight=-1,
        debug=False),
    test_cfg=dict(
        nms_pre=1000,
        min_bbox_size=0,
        score_thr=0.05,
        nms=dict(type='nms', iou_threshold=0.6),
        max_per_img=100))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)


================================================
FILE: configs/autoassign/README.md
================================================
# AutoAssign: Differentiable Label Assignment for Dense Object Detection

## Introduction

<!-- [ALGORITHM] -->

```
@article{zhu2020autoassign,
  title={AutoAssign: Differentiable Label Assignment for Dense Object Detection},
  author={Zhu, Benjin and Wang, Jianfeng and Jiang, Zhengkai and Zong, Fuhang and Liu, Songtao and Li, Zeming and Sun, Jian},
  journal={arXiv preprint arXiv:2007.03496},
  year={2020}
}
```

## Results and Models

| Backbone  | Style   | Lr schd | Mem (GB) |   box AP | Config | Download |
|:---------:|:-------:|:-------:|:--------:|:------:|:------:|:--------:|
| R-50     | pytorch | 1x      | 4.08      |   40.4  | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py)       |[model](https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.pth) &#124; [log](https://download.openmmlab.com/mmdetection/v2.0/autoassign/auto_assign_r50_fpn_1x_coco/auto_assign_r50_fpn_1x_coco_20210413_115540-5e17991f.log.json) |

**Note**:

1. We find that the performance is unstable with 1x setting and may fluctuate by about 0.3 mAP. mAP 40.3 ~ 40.6 is acceptable. Such fluctuation can also be found in the original implementation.
2. You can get a more stable results ~ mAP 40.6 with a schedule total 13 epoch, and learning rate is divided by 10 at 10th and 13th epoch.


================================================
FILE: configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py
================================================
# We follow the original implementation which
# adopts the Caffe pre-trained backbone.
_base_ = [
    '../_base_/datasets/coco_detection.py',
    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
    type='AutoAssign',
    pretrained='open-mmlab://detectron2/resnet50_caffe',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=False),
        norm_eval=True,
        style='caffe'),
    neck=dict(
        type='FPN',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        start_level=1,
        add_extra_convs=True,
        extra_convs_on_inputs=True,
        num_outs=5,
        relu_before_extra_convs=True,
        init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')),
    bbox_head=dict(
        type='AutoAssignHead',
        num_classes=80,
        in_channels=256,
        stacked_convs=4,
        feat_channels=256,
        strides=[8, 16, 32, 64, 128],
        loss_bbox=dict(type='GIoULoss', loss_weight=5.0)),
    train_cfg=None,
    test_cfg=dict(
        nms_pre=1000,
        min_bbox_size=0,
        score_thr=0.05,
        nms=dict(type='nms', iou_threshold=0.6),
        max_per_img=100))
img_norm_cfg = dict(
    mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img'])
        ])
]
data = dict(
    train=dict(pipeline=train_pipeline),
    val=dict(pipeline=test_pipeline),
    test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(lr=0.01, paramwise_cfg=dict(norm_decay_mult=0.))
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=1000,
    warmup_ratio=1.0 / 1000,
    step=[8, 11])
total_epochs = 12


================================================
FILE: configs/carafe/README.md
================================================
# CARAFE: Content-Aware ReAssembly of FEatures

## Introduction

<!-- [ALGORITHM] -->

We provide config files to reproduce the object detection & instance segmentation results in the ICCV 2019 Oral paper for [CARAFE: Content-Aware ReAssembly of FEatures](https://arxiv.org/abs/1905.02188).

```
@inproceedings{Wang_2019_ICCV,
    title = {CARAFE: Content-Aware ReAssembly of FEatures},
    author = {Wang, Jiaqi and Chen, Kai and Xu, Rui and Liu, Ziwei and Loy, Chen Change and Lin, Dahua},
    booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
    month = {October},
    year = {2019}
}
```

## Results and Models

The results on COCO 2017 val is shown in the below table.

| Method               | Backbone | Style   | Lr schd | Test Proposal Num | Inf time (fps) | Box AP | Mask AP | Config | Download |
|:--------------------:|:--------:|:-------:|:-------:|:-----------------:|:--------------:|:------:|:-------:|:------:|:--------:|
| Faster R-CNN w/ CARAFE | R-50-FPN | pytorch | 1x      | 1000 | 16.5 | 38.6   | 38.6       | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.386_20200504_175733-385a75b7.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/carafe/faster_rcnn_r50_fpn_carafe_1x_coco/faster_rcnn_r50_fpn_carafe_1x_coco_20200504_175733.log.json) |
| -                      |    -     |  -      | -       | 2000 |      |        |            |  |
| Mask R-CNN w/ CARAFE   | R-50-FPN | pytorch | 1x      | 1000 | 14.0 | 39.3   | 35.8       | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_bbox_mAP-0.393__segm_mAP-0.358_20200503_135957-8687f195.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/carafe/mask_rcnn_r50_fpn_carafe_1x_coco/mask_rcnn_r50_fpn_carafe_1x_coco_20200503_135957.log.json) |
| -                      |   -      |  -      |   -     | 2000 |      |        |            |  |

## Implementation

The CUDA implementation of CARAFE can be find at https://github.com/myownskyW7/CARAFE.


================================================
FILE: configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py
================================================
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
    neck=dict(
        type='FPN_CARAFE',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        num_outs=5,
        start_level=0,
        end_level=-1,
        norm_cfg=None,
        act_cfg=None,
        order=('conv', 'norm', 'act'),
        upsample_cfg=dict(
            type='carafe',
            up_kernel=5,
            up_group=1,
            encoder_kernel=3,
            encoder_dilation=1,
            compressed_channels=64)))
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=64),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=64),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    train=dict(pipeline=train_pipeline),
    val=dict(pipeline=test_pipeline),
    test=dict(pipeline=test_pipeline))


================================================
FILE: configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py
================================================
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
    neck=dict(
        type='FPN_CARAFE',
        in_channels=[256, 512, 1024, 2048],
        out_channels=256,
        num_outs=5,
        start_level=0,
        end_level=-1,
        norm_cfg=None,
        act_cfg=None,
        order=('conv', 'norm', 'act'),
        upsample_cfg=dict(
            type='carafe',
            up_kernel=5,
            up_group=1,
            encoder_kernel=3,
            encoder_dilation=1,
            compressed_channels=64)),
    roi_head=dict(
        mask_head=dict(
            upsample_cfg=dict(
                type='carafe',
                scale_factor=2,
                up_kernel=5,
                up_group=1,
                encoder_kernel=3,
                encoder_dilation=1,
                compressed_channels=64))))
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=64),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=64),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    train=dict(pipeline=train_pipeline),
    val=dict(pipeline=test_pipeline),
    test=dict(pipeline=test_pipeline))


================================================
FILE: configs/cascade_rcnn/README.md
================================================
# Cascade R-CNN: High Quality Object Detection and Instance Segmentation

## Introduction

<!-- [ALGORITHM] -->

```latex
@article{Cai_2019,
   title={Cascade R-CNN: High Quality Object Detection and Instance Segmentation},
   ISSN={1939-3539},
   url={http://dx.doi.org/10.1109/tpami.2019.2956516},
   DOI={10.1109/tpami.2019.2956516},
   journal={IEEE Transactions on Pattern Analysis and Machine Intelligence},
   publisher={Institute of Electrical and Electronics Engineers (IEEE)},
   author={Cai, Zhaowei and Vasconcelos, Nuno},
   year={2019},
   pages={1–1}
}
```

## Results and models

### Cascade R-CNN

|    Backbone     |  Style  | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download |
| :-------------: | :-----: | :-----: | :------: | :------------: | :----: |:------:|:--------:|
|    R-50-FPN     |  caffe  |   1x    |   4.2    |                |  40.4  | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.404_20200504_174853-b857be87.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco/cascade_rcnn_r50_caffe_fpn_1x_coco_20200504_174853.log.json) |
|    R-50-FPN     | pytorch |   1x    |   4.4    |      16.1      |  40.3  | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316-3dc56deb.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco/cascade_rcnn_r50_fpn_1x_coco_20200316_214748.log.json) |
|    R-50-FPN     | pytorch |   20e   |  -       |      -         | 41.0   | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_bbox_mAP-0.41_20200504_175131-e9872a90.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco/cascade_rcnn_r50_fpn_20e_coco_20200504_175131.log.json) |
|    R-101-FPN    |  caffe  |   1x    |  6.2     |                | 42.3   | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.423_20200504_175649-cab8dbd5.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco/cascade_rcnn_r101_caffe_fpn_1x_coco_20200504_175649.log.json) |
|    R-101-FPN    | pytorch |   1x    |   6.4    |      13.5      |  42.0  | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317-0b6a2fbf.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco/cascade_rcnn_r101_fpn_1x_coco_20200317_101744.log.json) |
|    R-101-FPN    | pytorch |   20e   |   -      |      -         |  42.5  | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_bbox_mAP-0.425_20200504_231812-5057dcc5.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco/cascade_rcnn_r101_fpn_20e_coco_20200504_231812.log.json) |
| X-101-32x4d-FPN | pytorch |   1x    |   7.6    |      10.9      |  43.7  | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316-95c2deb6.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco/cascade_rcnn_x101_32x4d_fpn_1x_coco_20200316_055608.log.json) |
| X-101-32x4d-FPN | pytorch |   20e   |  7.6     |                | 43.7   | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608-9ae0a720.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco/cascade_rcnn_x101_32x4d_fpn_20e_coco_20200906_134608.log.json) |
| X-101-64x4d-FPN | pytorch |   1x    |  10.7    |                | 44.7   | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702-43ce6a30.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco/cascade_rcnn_x101_64x4d_fpn_1x_coco_20200515_075702.log.json) |
| X-101-64x4d-FPN | pytorch |   20e   |  10.7    |                | 44.5   | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357-051557b1.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco/cascade_rcnn_x101_64x4d_fpn_20e_coco_20200509_224357.log.json)|

### Cascade Mask R-CNN

|    Backbone     |  Style  | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
| :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------: | :--------: |
|    R-50-FPN     |  caffe  |   1x    |  5.9     |                | 41.2   | 36.0    | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.412__segm_mAP-0.36_20200504_174659-5004b251.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco/cascade_mask_rcnn_r50_caffe_fpn_1x_coco_20200504_174659.log.json) |
|    R-50-FPN     | pytorch |   1x    |  6.0     |  11.2          | 41.2   | 35.9    | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203_170449.log.json) |
|    R-50-FPN     | pytorch |   20e   |  -       | -              | 41.9   | 36.5    | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_bbox_mAP-0.419__segm_mAP-0.365_20200504_174711-4af8e66e.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco/cascade_mask_rcnn_r50_fpn_20e_coco_20200504_174711.log.json)|
|    R-101-FPN    |  caffe  |   1x    |  7.8     |                | 43.2   | 37.6    | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.432__segm_mAP-0.376_20200504_174813-5c1e9599.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco/cascade_mask_rcnn_r101_caffe_fpn_1x_coco_20200504_174813.log.json)|
|    R-101-FPN    | pytorch |   1x    |  7.9     |  9.8           | 42.9   | 37.3    | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203-befdf6ee.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco/cascade_mask_rcnn_r101_fpn_1x_coco_20200203_092521.log.json) |
|    R-101-FPN    | pytorch |   20e   |  -       |  -             | 43.4   | 37.8    | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_bbox_mAP-0.434__segm_mAP-0.378_20200504_174836-005947da.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco/cascade_mask_rcnn_r101_fpn_20e_coco_20200504_174836.log.json)|
| X-101-32x4d-FPN | pytorch |   1x    |  9.2     |  8.6           | 44.3   | 38.3    | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201-0f411b1f.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco_20200201_052416.log.json) |
| X-101-32x4d-FPN | pytorch |   20e   |  9.2     |   -            | 45.0   | 39.0    | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917-ed1f4751.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco_20200528_083917.log.json) |
| X-101-64x4d-FPN | pytorch |   1x    |  12.2    |  6.7           | 45.3   | 39.2    | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203-9a2db89d.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco_20200203_044059.log.json) |
| X-101-64x4d-FPN | pytorch |   20e   |  12.2   |                 | 45.6     |39.5   | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033-bdb5126a.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco_20200512_161033.log.json)|

**Notes:**

- The `20e` schedule in Cascade (Mask) R-CNN indicates decreasing the lr at 16 and 19 epochs, with a total of 20 epochs.


================================================
FILE: configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py
================================================
_base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
    pretrained='open-mmlab://detectron2/resnet101_caffe',
    backbone=dict(depth=101))


================================================
FILE: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py
================================================
_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))


================================================
FILE: configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py
================================================
_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))


================================================
FILE: configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py
================================================
_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']

model = dict(
    pretrained='open-mmlab://detectron2/resnet50_caffe',
    backbone=dict(
        norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe'))

img_norm_cfg = dict(
    mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True, with_mask=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    train=dict(pipeline=train_pipeline),
    val=dict(pipeline=test_pipeline),
    test=dict(pipeline=test_pipeline))


================================================
FILE: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py
================================================
_base_ = [
    '../_base_/models/cascade_mask_rcnn_r50_fpn.py',
    '../_base_/datasets/coco_instance.py',
    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]


================================================
FILE: configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py
================================================
_base_ = [
    '../_base_/models/cascade_mask_rcnn_r50_fpn.py',
    '../_base_/datasets/coco_instance.py',
    '../_base_/schedules/schedule_20e.py', '../_base_/default_runtime.py'
]


================================================
FILE: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py
================================================
_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
    pretrained='open-mmlab://resnext101_32x4d',
    backbone=dict(
        type='ResNeXt',
        depth=101,
        groups=32,
        base_width=4,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        style='pytorch'))


================================================
FILE: configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py
================================================
_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'
model = dict(
    pretrained='open-mmlab://resnext101_32x4d',
    backbone=dict(
        type='ResNeXt',
        depth=101,
        groups=32,
        base_width=4,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        style='pytorch'))


================================================
FILE: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py
================================================
_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
    pretrained='open-mmlab://resnext101_64x4d',
    backbone=dict(
        type='ResNeXt',
        depth=101,
        groups=64,
        base_width=4,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        style='pytorch'))


================================================
FILE: configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py
================================================
_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'
model = dict(
    pretrained='open-mmlab://resnext101_64x4d',
    backbone=dict(
        type='ResNeXt',
        depth=101,
        groups=64,
        base_width=4,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        style='pytorch'))


================================================
FILE: configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py
================================================
_base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py'
model = dict(
    pretrained='open-mmlab://detectron2/resnet101_caffe',
    backbone=dict(depth=101))


================================================
FILE: configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py
================================================
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))


================================================
FILE: configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py
================================================
_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'
model = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))


================================================
FILE: configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py
================================================
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'

model = dict(
    pretrained='open-mmlab://detectron2/resnet50_caffe',
    backbone=dict(norm_cfg=dict(requires_grad=False), style='caffe'))

# use caffe img_norm
img_norm_cfg = dict(
    mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='Collect', keys=['img']),
        ])
]
data = dict(
    train=dict(pipeline=train_pipeline),
    val=dict(pipeline=test_pipeline),
    test=dict(pipeline=test_pipeline))


================================================
FILE: configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py
================================================
_base_ = [
    '../_base_/models/cascade_rcnn_r50_fpn.py',
    '../_base_/datasets/coco_detection.py',
    '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]


================================================
FILE: configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py
================================================
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
# learning policy
lr_config = dict(step=[16, 19])
runner = dict(type='EpochBasedRunner', max_epochs=20)


================================================
FILE: configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py
================================================
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
    pretrained='open-mmlab://resnext101_32x4d',
    backbone=dict(
        type='ResNeXt',
        depth=101,
        groups=32,
        base_width=4,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        style='pytorch'))


================================================
FILE: configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py
================================================
_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'
model = dict(
    pretrained='open-mmlab://resnext101_32x4d',
    backbone=dict(
        type='ResNeXt',
        depth=101,
        groups=32,
        base_width=4,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        style='pytorch'))


================================================
FILE: configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py
================================================
_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
    type='CascadeRCNN',
    pretrained='open-mmlab://resnext101_64x4d',
    backbone=dict(
        type='ResNeXt',
        depth=101,
        groups=64,
        base_width=4,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        style='pytorch'))


================================================
FILE: configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py
================================================
_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'
model = dict(
    type='CascadeRCNN',
    pretrained='open-mmlab://resnext101_64x4d',
    backbone=dict(
        type='ResNeXt',
        depth=101,
        groups=64,
        base_width=4,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        style='pytorch'))


================================================
FILE: configs/cascade_rpn/README.md
================================================
# Cascade RPN

<!-- [ALGORITHM] -->

We provide the code for reproducing experiment results of [Cascade RPN](https://arxiv.org/abs/1909.06720).

```
@inproceedings{vu2019cascade,
  title={Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution},
  author={Vu, Thang and Jang, Hyunjun and Pham, Trung X and Yoo, Chang D},
  booktitle={Conference on Neural Information Processing Systems (NeurIPS)},
  year={2019}
}
```

## Benchmark

### Region proposal performance

| Method | Backbone | Style | Mem (GB) | Train time (s/iter) | Inf time (fps) | AR 1000 |                Download                |
|:------:|:--------:|:-----:|:--------:|:-------------------:|:--------------:|:-------:|:--------------------------------------:|
|  CRPN  | R-50-FPN | caffe |     -    |          -          |        -       |   72.0  | [model](https://drive.google.com/file/d/1qxVdOnCgK-ee7_z0x6mvAir_glMu2Ihi/view?usp=sharing) |

### Detection performance

|     Method    |   Proposal  | Backbone |  Style  | Schedule | Mem (GB) | Train time (s/iter) | Inf time (fps) | box AP |                   Download                   |
|:-------------:|:-----------:|:--------:|:-------:|:--------:|:--------:|:-------------------:|:--------------:|:------:|:--------------------------------------------:|
|   Fast R-CNN  | Cascade RPN | R-50-FPN |  caffe  |    1x    |    -     |          -          |        -       |  39.9  | [model](https://drive.google.com/file/d/1NmbnuY5VHi8I9FE8xnp5uNvh2i-t-6_L/view?usp=sharing) |
|  Faster R-CNN | Cascade RPN | R-50-FPN |  caffe  |    1x    |    -     |          -          |        -       |  40.4  | [model](https://drive.google.com/file/d/1dS3Q66qXMJpcuuQgDNkLp669E5w1UMuZ/view?usp=sharing) |


================================================
FILE: configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py
================================================
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'
model = dict(
    pretrained='open-mmlab://detectron2/resnet50_caffe',
    backbone=dict(
        type='ResNet',
        depth=50,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=False),
        norm_eval=True,
        style='caffe'),
    roi_head=dict(
        bbox_head=dict(
            bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
    # model training and testing settings
    train_cfg=dict(
        rcnn=dict(
            assigner=dict(
                pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
            sampler=dict(num=256))),
    test_cfg=dict(rcnn=dict(score_thr=1e-3)))
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
    mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadProposals', num_max_proposals=300),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='Pad', size_divisor=32),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile'),
    dict(type='LoadProposals', num_max_proposals=300),
    dict(
        type='MultiScaleFlipAug',
        img_scale=(1333, 800),
        flip=False,
        transforms=[
            dict(type='Resize', keep_ratio=True),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='Pad', size_divisor=32),
            dict(type='ImageToTensor', keys=['img']),
            dict(type='ToTensor', keys=['proposals']),
            dict(
                type='ToDataContainer',
                fields=[dict(key='proposals', stack=False)]),
            dict(type='Collect', keys=['img', 'proposals']),
        ])
]
data = dict(
    train=dict(
        proposal_file=data_root +
        'proposals/crpn_r50_caffe_fpn_1x_train2017.pkl',
        pipeline=train_pipeline),
    val=dict(
        proposal_file=data_root +
        'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
        pipeline=test_pipeline),
    test=dict(
        proposal_file=data_root +
        'proposals/crpn_r50_caffe_fpn_1x_val2017.pkl',
        pipeline=test_pipeline))
optimizer_config = dict(
    _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))


================================================
FILE: configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py
================================================
_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'
rpn_weight = 0.7
model = dict(
    rpn_head=dict(
        _delete_=True,
        type='CascadeRPNHead',
        num_stages=2,
        stages=[
            dict(
                type='StageCascadeRPNHead',
                in_channels=256,
                feat_channels=256,
                anchor_generator=dict(
                    type='AnchorGenerator',
                    scales=[8],
                    ratios=[1.0],
                    strides=[4, 8, 16, 32, 64]),
                adapt_cfg=dict(type='dilation', dilation=3),
                bridged_feature=True,
                sampling=False,
                with_cls=False,
                reg_decoded_bbox=True,
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    target_means=(.0, .0, .0, .0),
                    target_stds=(0.1, 0.1, 0.5, 0.5)),
                loss_bbox=dict(
                    type='IoULoss', linear=True,
                    loss_weight=10.0 * rpn_weight)),
            dict(
                type='StageCascadeRPNHead',
                in_channels=256,
                feat_channels=256,
                adapt_cfg=dict(type='offset'),
                bridged_feature=False,
                sampling=True,
                with_cls=True,
                reg_decoded_bbox=True,
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    target_means=(.0, .0, .0, .0),
                    target_stds=(0.05, 0.05, 0.1, 0.1)),
                loss_cls=dict(
                    type='CrossEntropyLoss',
                    use_sigmoid=True,
                    loss_weight=1.0 * rpn_weight),
                loss_bbox=dict(
                    type='IoULoss', linear=True,
                    loss_weight=10.0 * rpn_weight))
        ]),
    roi_head=dict(
        bbox_head=dict(
            bbox_coder=dict(target_stds=[0.04, 0.04, 0.08, 0.08]),
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.5),
            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))),
    # model training and testing settings
    train_cfg=dict(
        rpn=[
            dict(
                assigner=dict(
                    type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
                allowed_border=-1,
                pos_weight=-1,
                debug=False),
            dict(
                assigner=dict(
                    type='MaxIoUAssigner',
                    pos_iou_thr=0.7,
                    neg_iou_thr=0.7,
                    min_pos_iou=0.3,
                    ignore_iof_thr=-1),
                sampler=dict(
                    type='RandomSampler',
                    num=256,
                    pos_fraction=0.5,
                    neg_pos_ub=-1,
                    add_gt_as_proposals=False),
                allowed_border=-1,
                pos_weight=-1,
                debug=False)
        ],
        rpn_proposal=dict(max_per_img=300, nms=dict(iou_threshold=0.8)),
        rcnn=dict(
            assigner=dict(
                pos_iou_thr=0.65, neg_iou_thr=0.65, min_pos_iou=0.65),
            sampler=dict(type='RandomSampler', num=256))),
    test_cfg=dict(
        rpn=dict(max_per_img=300, nms=dict(iou_threshold=0.8)),
        rcnn=dict(score_thr=1e-3)))
optimizer_config = dict(
    _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))


================================================
FILE: configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py
================================================
_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py'
model = dict(
    rpn_head=dict(
        _delete_=True,
        type='CascadeRPNHead',
        num_stages=2,
        stages=[
            dict(
                type='StageCascadeRPNHead',
                in_channels=256,
                feat_channels=256,
                anchor_generator=dict(
                    type='AnchorGenerator',
                    scales=[8],
                    ratios=[1.0],
                    strides=[4, 8, 16, 32, 64]),
                adapt_cfg=dict(type='dilation', dilation=3),
                bridged_feature=True,
                sampling=False,
                with_cls=False,
                reg_decoded_bbox=True,
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    target_means=(.0, .0, .0, .0),
                    target_stds=(0.1, 0.1, 0.5, 0.5)),
                loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0)),
            dict(
                type='StageCascadeRPNHead',
                in_channels=256,
                feat_channels=256,
                adapt_cfg=dict(type='offset'),
                bridged_feature=False,
                sampling=True,
                with_cls=True,
                reg_decoded_bbox=True,
                bbox_coder=dict(
                    type='DeltaXYWHBBoxCoder',
                    target_means=(.0, .0, .0, .0),
                    target_stds=(0.05, 0.05, 0.1, 0.1)),
                loss_cls=dict(
                    type='CrossEntropyLoss', use_sigmoid=True,
                    loss_weight=1.0),
                loss_bbox=dict(type='IoULoss', linear=True, loss_weight=10.0))
        ]),
    train_cfg=dict(rpn=[
        dict(
            assigner=dict(
                type='RegionAssigner', center_ratio=0.2, ignore_ratio=0.5),
            allowed_border=-1,
            pos_weight=-1,
            debug=False),
        dict(
            assigner=dict(
                type='MaxIoUAssigner',
                pos_iou_thr=0.7,
                neg_iou_thr=0.7,
                min_pos_iou=0.3,
                ignore_iof_thr=-1,
                iou_calculator=dict(type='BboxOverlaps2D')),
            sampler=dict(
                type='RandomSampler',
                num=256,
                pos_fraction=0.5,
                neg_pos_ub=-1,
                add_gt_as_proposals=False),
            allowed_border=-1,
            pos_weight=-1,
            debug=False)
    ]),
    test_cfg=dict(
        rpn=dict(
            nms_pre=2000,
            max_per_img=2000,
            nms=dict(type='nms', iou_threshold=0.8),
            min_bbox_size=0)))
optimizer_config = dict(
    _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))


================================================
FILE: configs/centripetalnet/README.md
================================================
# CentripetalNet

## Introduction

<!-- [ALGORITHM] -->

```latex
@InProceedings{Dong_2020_CVPR,
author = {Dong, Zhiwei and Li, Guoxuan and Liao, Yue and Wang, Fei and Ren, Pengju and Qian, Chen},
title = {CentripetalNet: Pursuing High-Quality Keypoint Pairs for Object Detection},
booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {June},
year = {2020}
}
```

## Results and models

| Backbone        | Batch Size | Step/Total Epochs | Mem (GB) | Inf time (fps) | box AP | Config | Download |
| :-------------: | :--------: |:----------------: | :------: | :------------: | :----: | :------: | :--------: |
| HourglassNet-104 | [16 x 6](./centripetalnet_hourglass104_mstest_16x6_210e_coco.py) | 190/210 | 16.7 | 3.7 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804-3ccc61e5.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco/centripetalnet_hourglass104_mstest_16x6_210e_coco_20200915_204804.log.json) |

Note:

- TTA setting is single-scale and `flip=True`.
- The model we released is the best checkpoint rather than the latest checkpoint (box AP 44.8 vs 44.6 in our experiment).


================================================
FILE: configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py
================================================
_base_ = [
    '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]

# model settings
model = dict(
    type='CornerNet',
    backbone=dict(
        type='HourglassNet',
        downsample_times=5,
        num_stacks=2,
        stage_channels=[256, 256, 384, 384, 384, 512],
        stage_blocks=[2, 2, 2, 2, 2, 4],
        norm_cfg=dict(type='BN', requires_grad=True)),
    neck=None,
    bbox_head=dict(
        type='CentripetalHead',
        num_classes=80,
        in_channels=256,
        num_feat_levels=2,
        corner_emb_channels=0,
        loss_heatmap=dict(
            type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
        loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1),
        loss_guiding_shift=dict(
            type='SmoothL1Loss', beta=1.0, loss_weight=0.05),
        loss_centripetal_shift=dict(
            type='SmoothL1Loss', beta=1.0, loss_weight=1)),
    # training and testing settings
    train_cfg=None,
    test_cfg=dict(
        corner_topk=100,
        local_maximum_kernel=3,
        distance_threshold=0.5,
        score_thr=0.05,
        max_per_img=100,
        nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile', to_float32=True),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(
        type='PhotoMetricDistortion',
        brightness_delta=32,
        contrast_range=(0.5, 1.5),
        saturation_range=(0.5, 1.5),
        hue_delta=18),
    dict(
        type='RandomCenterCropPad',
        crop_size=(511, 511),
        ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
        test_mode=False,
        test_pad_mode=None,
        **img_norm_cfg),
    dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile', to_float32=True),
    dict(
        type='MultiScaleFlipAug',
        scale_factor=1.0,
        flip=True,
        transforms=[
            dict(type='Resize'),
            dict(
                type='RandomCenterCropPad',
                crop_size=None,
                ratios=None,
                border=None,
                test_mode=True,
                test_pad_mode=['logical_or', 127],
                **img_norm_cfg),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='ImageToTensor', keys=['img']),
            dict(
                type='Collect',
                keys=['img'],
                meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
                           'scale_factor', 'flip', 'img_norm_cfg', 'border')),
        ])
]
data = dict(
    samples_per_gpu=6,
    workers_per_gpu=3,
    train=dict(pipeline=train_pipeline),
    val=dict(pipeline=test_pipeline),
    test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=1.0 / 3,
    step=[190])
runner = dict(type='EpochBasedRunner', max_epochs=210)


================================================
FILE: configs/cityscapes/README.md
================================================
# Cityscapes Dataset

<!-- [DATASET] -->

```
@inproceedings{Cordts2016Cityscapes,
   title={The Cityscapes Dataset for Semantic Urban Scene Understanding},
   author={Cordts, Marius and Omran, Mohamed and Ramos, Sebastian and Rehfeld, Timo and Enzweiler, Markus and Benenson, Rodrigo and Franke, Uwe and Roth, Stefan and Schiele, Bernt},
   booktitle={Proc. of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
   year={2016}
}
```

## Common settings

- All baselines were trained using 8 GPU with a batch size of 8 (1 images per GPU) using the [linear scaling rule](https://arxiv.org/abs/1706.02677) to scale the learning rate.
- All models were trained on `cityscapes_train`, and tested on `cityscapes_val`.
- 1x training schedule indicates 64 epochs which corresponds to slightly less than the 24k iterations reported in the original schedule from the [Mask R-CNN paper](https://arxiv.org/abs/1703.06870)
- COCO pre-trained weights are used to initialize.
- A conversion [script](../../tools/dataset_converters/cityscapes.py) is provided to convert Cityscapes into COCO format. Please refer to [install.md](../../docs/1_exist_data_model.md#prepare-datasets) for details.
- `CityscapesDataset` implemented three evaluation methods. `bbox` and `segm` are standard COCO bbox/mask AP. `cityscapes` is the cityscapes dataset official evaluation, which may be slightly higher than COCO.

### Faster R-CNN

|    Backbone     |  Style  | Lr schd | Scale    | Mem (GB) | Inf time (fps) | box AP | Config | Download   |
| :-------------: | :-----: | :-----: | :---:    | :------: | :------------: | :----: | :------: | :--------: |
|    R-50-FPN     | pytorch |   1x    | 800-1024 |   5.2    |       -        |  40.3  | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_20200502-829424c0.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes_20200502_114915.log.json) |

### Mask R-CNN

|    Backbone     |  Style  | Lr schd | Scale    | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
| :-------------: | :-----: | :-----: | :------: | :------: | :------------: | :----: | :-----: | :------: | :------: |
|    R-50-FPN     | pytorch |   1x    | 800-1024 |   5.3    |       -        |  40.9  |  36.4   | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes/mask_rcnn_r50_fpn_1x_cityscapes_20201211_133733-d2858245.pth) &#124; [log](https://download.openmmlab.com/mmdetection/v2.0/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes/mask_rcnn_r50_fpn_1x_cityscapes_20201211_133733.log.json) |


================================================
FILE: configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py
================================================
_base_ = [
    '../_base_/models/faster_rcnn_r50_fpn.py',
    '../_base_/datasets/cityscapes_detection.py',
    '../_base_/default_runtime.py'
]
model = dict(
    pretrained=None,
    roi_head=dict(
        bbox_head=dict(
            type='Shared2FCBBoxHead',
            in_channels=256,
            fc_out_channels=1024,
            roi_feat_size=7,
            num_classes=8,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0., 0., 0., 0.],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=0.001,
    # [7] yields higher performance than [6]
    step=[7])
runner = dict(
    type='EpochBasedRunner', max_epochs=8)  # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'  # noqa


================================================
FILE: configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py
================================================
_base_ = [
    '../_base_/models/mask_rcnn_r50_fpn.py',
    '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py'
]
model = dict(
    pretrained=None,
    roi_head=dict(
        bbox_head=dict(
            type='Shared2FCBBoxHead',
            in_channels=256,
            fc_out_channels=1024,
            roi_feat_size=7,
            num_classes=8,
            bbox_coder=dict(
                type='DeltaXYWHBBoxCoder',
                target_means=[0., 0., 0., 0.],
                target_stds=[0.1, 0.1, 0.2, 0.2]),
            reg_class_agnostic=False,
            loss_cls=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
            loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
        mask_head=dict(
            type='FCNMaskHead',
            num_convs=4,
            in_channels=256,
            conv_out_channels=256,
            num_classes=8,
            loss_mask=dict(
                type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))))
# optimizer
# lr is set for a batch size of 8
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=0.001,
    # [7] yields higher performance than [6]
    step=[7])
runner = dict(
    type='EpochBasedRunner', max_epochs=8)  # actual epoch = 8 * 8 = 64
log_config = dict(interval=100)
# For better, more stable performance initialize from COCO
load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth'  # noqa


================================================
FILE: configs/cornernet/README.md
================================================
# CornerNet

## Introduction

<!-- [ALGORITHM] -->

```latex
@inproceedings{law2018cornernet,
  title={Cornernet: Detecting objects as paired keypoints},
  author={Law, Hei and Deng, Jia},
  booktitle={15th European Conference on Computer Vision, ECCV 2018},
  pages={765--781},
  year={2018},
  organization={Springer Verlag}
}
```

## Results and models

| Backbone        | Batch Size | Step/Total Epochs | Mem (GB) | Inf time (fps) | box AP | Config | Download |
| :-------------: | :--------: |:----------------: | :------: | :------------: | :----: | :------: | :--------: |
| HourglassNet-104 | [10 x 5](./cornernet_hourglass104_mstest_10x5_210e_coco.py) | 180/210 | 13.9 | 4.2 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720-5fefbf1c.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco/cornernet_hourglass104_mstest_10x5_210e_coco_20200824_185720.log.json) |
| HourglassNet-104 | [8 x 6](./cornernet_hourglass104_mstest_8x6_210e_coco.py) | 180/210 | 15.9 | 4.2 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618-79b44c30.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco/cornernet_hourglass104_mstest_8x6_210e_coco_20200825_150618.log.json) |
| HourglassNet-104 | [32 x 3](./cornernet_hourglass104_mstest_32x3_210e_coco.py) | 180/210 | 9.5 | 3.9 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110-1efaea91.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco/cornernet_hourglass104_mstest_32x3_210e_coco_20200819_203110.log.json) |

Note:

- TTA setting is single-scale and `flip=True`.
- Experiments with `images_per_gpu=6` are conducted on Tesla V100-SXM2-32GB, `images_per_gpu=3` are conducted on GeForce GTX 1080 Ti.
- Here are the descriptions of each experiment setting:
  - 10 x 5: 10 GPUs with 5 images per gpu. This is the same setting as that reported in the original paper.
  - 8 x 6: 8 GPUs with 6 images per gpu. The total batchsize is similar to paper and only need 1 node to train.
  - 32 x 3: 32 GPUs with 3 images per gpu. The default setting for 1080TI and need 4 nodes to train.


================================================
FILE: configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py
================================================
_base_ = [
    '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]

# model settings
model = dict(
    type='CornerNet',
    backbone=dict(
        type='HourglassNet',
        downsample_times=5,
        num_stacks=2,
        stage_channels=[256, 256, 384, 384, 384, 512],
        stage_blocks=[2, 2, 2, 2, 2, 4],
        norm_cfg=dict(type='BN', requires_grad=True)),
    neck=None,
    bbox_head=dict(
        type='CornerHead',
        num_classes=80,
        in_channels=256,
        num_feat_levels=2,
        corner_emb_channels=1,
        loss_heatmap=dict(
            type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
        loss_embedding=dict(
            type='AssociativeEmbeddingLoss',
            pull_weight=0.10,
            push_weight=0.10),
        loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
    # training and testing settings
    train_cfg=None,
    test_cfg=dict(
        corner_topk=100,
        local_maximum_kernel=3,
        distance_threshold=0.5,
        score_thr=0.05,
        max_per_img=100,
        nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile', to_float32=True),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(
        type='PhotoMetricDistortion',
        brightness_delta=32,
        contrast_range=(0.5, 1.5),
        saturation_range=(0.5, 1.5),
        hue_delta=18),
    dict(
        type='RandomCenterCropPad',
        crop_size=(511, 511),
        ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
        test_mode=False,
        test_pad_mode=None,
        **img_norm_cfg),
    dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile', to_float32=True),
    dict(
        type='MultiScaleFlipAug',
        scale_factor=1.0,
        flip=True,
        transforms=[
            dict(type='Resize'),
            dict(
                type='RandomCenterCropPad',
                crop_size=None,
                ratios=None,
                border=None,
                test_mode=True,
                test_pad_mode=['logical_or', 127],
                **img_norm_cfg),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='ImageToTensor', keys=['img']),
            dict(
                type='Collect',
                keys=['img'],
                meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
                           'scale_factor', 'flip', 'img_norm_cfg', 'border')),
        ])
]
data = dict(
    samples_per_gpu=5,
    workers_per_gpu=3,
    train=dict(pipeline=train_pipeline),
    val=dict(pipeline=test_pipeline),
    test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=1.0 / 3,
    step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)


================================================
FILE: configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py
================================================
_base_ = [
    '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]

# model settings
model = dict(
    type='CornerNet',
    backbone=dict(
        type='HourglassNet',
        downsample_times=5,
        num_stacks=2,
        stage_channels=[256, 256, 384, 384, 384, 512],
        stage_blocks=[2, 2, 2, 2, 2, 4],
        norm_cfg=dict(type='BN', requires_grad=True)),
    neck=None,
    bbox_head=dict(
        type='CornerHead',
        num_classes=80,
        in_channels=256,
        num_feat_levels=2,
        corner_emb_channels=1,
        loss_heatmap=dict(
            type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
        loss_embedding=dict(
            type='AssociativeEmbeddingLoss',
            pull_weight=0.10,
            push_weight=0.10),
        loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
    # training and testing settings
    train_cfg=None,
    test_cfg=dict(
        corner_topk=100,
        local_maximum_kernel=3,
        distance_threshold=0.5,
        score_thr=0.05,
        max_per_img=100,
        nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile', to_float32=True),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(
        type='PhotoMetricDistortion',
        brightness_delta=32,
        contrast_range=(0.5, 1.5),
        saturation_range=(0.5, 1.5),
        hue_delta=18),
    dict(
        type='RandomCenterCropPad',
        crop_size=(511, 511),
        ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
        test_mode=False,
        test_pad_mode=None,
        **img_norm_cfg),
    dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile', to_float32=True),
    dict(
        type='MultiScaleFlipAug',
        scale_factor=1.0,
        flip=True,
        transforms=[
            dict(type='Resize'),
            dict(
                type='RandomCenterCropPad',
                crop_size=None,
                ratios=None,
                border=None,
                test_mode=True,
                test_pad_mode=['logical_or', 127],
                **img_norm_cfg),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='ImageToTensor', keys=['img']),
            dict(
                type='Collect',
                keys=['img'],
                meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
                           'scale_factor', 'flip', 'img_norm_cfg', 'border')),
        ])
]
data = dict(
    samples_per_gpu=3,
    workers_per_gpu=3,
    train=dict(pipeline=train_pipeline),
    val=dict(pipeline=test_pipeline),
    test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=1.0 / 3,
    step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)


================================================
FILE: configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py
================================================
_base_ = [
    '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'
]

# model settings
model = dict(
    type='CornerNet',
    backbone=dict(
        type='HourglassNet',
        downsample_times=5,
        num_stacks=2,
        stage_channels=[256, 256, 384, 384, 384, 512],
        stage_blocks=[2, 2, 2, 2, 2, 4],
        norm_cfg=dict(type='BN', requires_grad=True)),
    neck=None,
    bbox_head=dict(
        type='CornerHead',
        num_classes=80,
        in_channels=256,
        num_feat_levels=2,
        corner_emb_channels=1,
        loss_heatmap=dict(
            type='GaussianFocalLoss', alpha=2.0, gamma=4.0, loss_weight=1),
        loss_embedding=dict(
            type='AssociativeEmbeddingLoss',
            pull_weight=0.10,
            push_weight=0.10),
        loss_offset=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1)),
    # training and testing settings
    train_cfg=None,
    test_cfg=dict(
        corner_topk=100,
        local_maximum_kernel=3,
        distance_threshold=0.5,
        score_thr=0.05,
        max_per_img=100,
        nms=dict(type='soft_nms', iou_threshold=0.5, method='gaussian')))
# data settings
img_norm_cfg = dict(
    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
    dict(type='LoadImageFromFile', to_float32=True),
    dict(type='LoadAnnotations', with_bbox=True),
    dict(
        type='PhotoMetricDistortion',
        brightness_delta=32,
        contrast_range=(0.5, 1.5),
        saturation_range=(0.5, 1.5),
        hue_delta=18),
    dict(
        type='RandomCenterCropPad',
        crop_size=(511, 511),
        ratios=(0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.2, 1.3),
        test_mode=False,
        test_pad_mode=None,
        **img_norm_cfg),
    dict(type='Resize', img_scale=(511, 511), keep_ratio=False),
    dict(type='RandomFlip', flip_ratio=0.5),
    dict(type='Normalize', **img_norm_cfg),
    dict(type='DefaultFormatBundle'),
    dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
    dict(type='LoadImageFromFile', to_float32=True),
    dict(
        type='MultiScaleFlipAug',
        scale_factor=1.0,
        flip=True,
        transforms=[
            dict(type='Resize'),
            dict(
                type='RandomCenterCropPad',
                crop_size=None,
                ratios=None,
                border=None,
                test_mode=True,
                test_pad_mode=['logical_or', 127],
                **img_norm_cfg),
            dict(type='RandomFlip'),
            dict(type='Normalize', **img_norm_cfg),
            dict(type='ImageToTensor', keys=['img']),
            dict(
                type='Collect',
                keys=['img'],
                meta_keys=('filename', 'ori_shape', 'img_shape', 'pad_shape',
                           'scale_factor', 'flip', 'img_norm_cfg', 'border')),
        ])
]
data = dict(
    samples_per_gpu=6,
    workers_per_gpu=3,
    train=dict(pipeline=train_pipeline),
    val=dict(pipeline=test_pipeline),
    test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(type='Adam', lr=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
    policy='step',
    warmup='linear',
    warmup_iters=500,
    warmup_ratio=1.0 / 3,
    step=[180])
runner = dict(type='EpochBasedRunner', max_epochs=210)


================================================
FILE: configs/dcn/README.md
================================================
# Deformable Convolutional Networks

## Introduction

<!-- [ALGORITHM] -->

```none
@inproceedings{dai2017deformable,
  title={Deformable Convolutional Networks},
  author={Dai, Jifeng and Qi, Haozhi and Xiong, Yuwen and Li, Yi and Zhang, Guodong and Hu, Han and Wei, Yichen},
  booktitle={Proceedings of the IEEE international conference on computer vision},
  year={2017}
}
```

<!-- [ALGORITHM] -->

```
@article{zhu2018deformable,
  title={Deformable ConvNets v2: More Deformable, Better Results},
  author={Zhu, Xizhou and Hu, Han and Lin, Stephen and Dai, Jifeng},
  journal={arXiv preprint arXiv:1811.11168},
  year={2018}
}
```

## Results and Models

| Backbone         | Model        | Style   | Conv          | Pool   | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download |
|:----------------:|:------------:|:-------:|:-------------:|:------:|:-------:|:--------:|:--------------:|:------:|:-------:|:------:|:--------:|
| R-50-FPN         | Faster       | pytorch | dconv(c3-c5)  | -      | 1x      | 4.0  | 17.8 | 41.3 |     | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130_212941.log.json) |
| R-50-FPN         | Faster       | pytorch | mdconv(c3-c5) | -      | 1x      | 4.1  | 17.6 | 41.4 |     | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130-d099253b.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130_222144.log.json) |
| *R-50-FPN (dg=4) | Faster       | pytorch | mdconv(c3-c5) | -      | 1x      | 4.2  | 17.4 | 41.5 |     | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130-01262257.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130_222058.log.json) |
| R-50-FPN         | Faster       | pytorch | -             | dpool  | 1x      | 5.0  | 17.2 | 38.9 |     | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307-90d3c01d.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307_203250.log.json) |
| R-50-FPN         | Faster       | pytorch | -             | mdpool | 1x      | 5.8  | 16.6 | 38.7 |     | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307_203304.log.json) |
| R-101-FPN        | Faster       | pytorch | dconv(c3-c5)  | -      | 1x      | 6.0  | 12.5 | 42.7 |     | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-1377f13d.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203_230019.log.json) |
| X-101-32x4d-FPN | Faster        | pytorch | dconv(c3-c5)  | -      | 1x      | 7.3  | 10.0  | 44.5 |     | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203-4f85c69c.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203_001325.log.json) |
| R-50-FPN         | Mask         | pytorch | dconv(c3-c5)  | -      | 1x      | 4.5  | 15.4 | 41.8 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203-4d9ad43b.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203_061339.log.json) |
| R-50-FPN         | Mask         | pytorch | mdconv(c3-c5) | -      | 1x      | 4.5  | 15.1 | 41.5 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203_063443.log.json) |
| R-101-FPN        | Mask         | pytorch | dconv(c3-c5)  | -      | 1x      | 6.5  | 11.7  | 43.5 | 38.9  | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216-a71f5bce.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216_191601.log.json) |
| R-50-FPN         | Cascade      | pytorch | dconv(c3-c5)  | -      | 1x      | 4.5  | 14.6 | 43.8 |     | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-2f1fca44.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130_220843.log.json) |
| R-101-FPN        | Cascade      | pytorch | dconv(c3-c5)  | -      | 1x      | 6.4  | 11.0 | 45.0 |     | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-3b2f0594.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203_224829.log.json) |
| R-50-FPN         | Cascade Mask | pytorch | dconv(c3-c5)  | -      | 1x      | 6.0  | 10.0  | 44.4 | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202_010309.log.json) |
| R-101-FPN        | Cascade Mask | pytorch | dconv(c3-c5)  | -      | 1x      | 8.0  | 8.6  | 45.8 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204_134006.log.json) |
| X-101-32x4d-FPN        | Cascade Mask | pytorch | dconv(c3-c5)  | -      | 1x      | 9.2 |   | 47.3 | 41.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py) | [model](http://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth) &#124; [log](http://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-20200606_183737.log.json) |

**Notes:**

- `dconv` and `mdconv` denote (modulated) deformable convolution, `c3-c5` means adding dconv in resnet stage 3 to 5. `dpool` and `mdpool` denote (modulated) deformable roi pooling.
- The dcn ops are modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch, which should be more memory efficient and slightly faster.
- (*) For R-50-FPN (dg=4), dg is short for deformable_group. This model is trained and tested on Amazon EC2 p3dn.24xlarge instance.
- **Memory, Train/Inf time is outdated.**


================================================
FILE: configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
================================================
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
================================================
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
================================================
_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
================================================
_base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
================================================
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
================================================
_base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
================================================
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py
================================================
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
    roi_head=dict(
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(
                _delete_=True,
                type='DeformRoIPoolPack',
                output_size=7,
                output_channels=256),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32])))


================================================
FILE: configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
================================================
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py
================================================
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py
================================================
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
    roi_head=dict(
        bbox_roi_extractor=dict(
            type='SingleRoIExtractor',
            roi_layer=dict(
                _delete_=True,
                type='ModulatedDeformRoIPoolPack',
                output_size=7,
                output_channels=256),
            out_channels=256,
            featmap_strides=[4, 8, 16, 32])))


================================================
FILE: configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
================================================
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
model = dict(
    pretrained='open-mmlab://resnext101_32x4d',
    backbone=dict(
        type='ResNeXt',
        depth=101,
        groups=32,
        base_width=4,
        num_stages=4,
        out_indices=(0, 1, 2, 3),
        frozen_stages=1,
        norm_cfg=dict(type='BN', requires_grad=True),
        style='pytorch',
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
================================================
_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
================================================
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
================================================
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'
model = dict(
    backbone=dict(
        dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False),
        stage_with_dcn=(False, True, True, True)))


================================================
FILE: configs/deepfashion/README.md
================================================
# DeepFashion

<!-- [DATASET] -->

[MMFashion](https://github.com/open-mmlab/mmfashion) develops "fashion parsing and segmentation" module
based on the dataset
[DeepFashion-Inshop](https://drive.google.com/drive/folders/0B7EVK8r0v71pVDZFQXRsMDZCX1E?usp=sharing).
Its annotation follows COCO style.
To use it, you need to first download the data. Note that we only use "img_highres" in this task.
The file tree should be like this:

```sh
mmdetection
├── mmdet
├── tools
├── configs
├── data
│   ├── DeepFashion
│   │   ├── In-shop
│   │   ├── Anno
│   │   │   ├── segmentation
│   │   │   |   ├── DeepFashion_segmentation_train.json
│   │   │   |   ├── DeepFashion_segmentation_query.json
│   │   │   |   ├── DeepFashion_segmentation_gallery.json
│   │   │   ├── list_bbox_inshop.txt
│   │   │   ├── list_description_inshop.json
│   │   │   ├── list_item_inshop.txt
│   │   │   └── list_landmarks_inshop.txt
│   │   ├── Eval
│   │   │   └── list_eval_partition.txt
│   │   ├── Img
│   │   │   ├── img
│   │   │   │   ├──XXX.jpg
│   │   │   ├── img_highres
│   │   │   └── ├──XXX.jpg

```

After that you can train the Mask RCNN r50 on DeepFashion-In-shop dataset by launching training with the `mask_rcnn_r50_fpn_1x.py` config
or creating your own config file.

```
@inproceedings{liuLQWTcvpr16DeepFashion,
   author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou},
   title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations},
   booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
   month = {June},
   year = {2016}
}
```

## Model Zoo

|   Backbone  |  Model type  |       Dataset       |  bbox detection Average Precision  | segmentation Average Precision |  Config |      Download (Google)      |
| :---------: | :----------: | :-----------------: | :--------------------------------: | :----------------------------: | :---------:| :-------------------------: |
|   ResNet50  |   Mask RCNN  | DeepFashion-In-shop |                0.599               |              0.584             |[config](https://github.com/open-mmlab/mmdetection/blob/master/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py)|  [model](https://drive.google.com/open?id=1q6zF7J6Gb-FFgM87oIORIt6uBozaXp5r) &#124; [log](https://drive.google.com/file/d/1qTK4Dr4FFLa9fkdI6UVko408gkrfTRLP/view?usp=sharing)   |


================================================
FILE: configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py
================================================
_base_ = [
    '../_base_/models/mask_rcnn_r50_fpn.py',
    '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py',
    '../_base_/default_runtime.py'
]
model = dict(
    roi_head=dict(
        bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15)))
# runtime settings
runner = dict(type='EpochBasedRunner', max_epochs=15)


================================================
FILE: configs/deformable_detr/README.md
================================================
# Deformable DETR

## Introduction

<!-- [ALGORITHM] -->

We provide the config files for Deformable DETR: [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159).

```
@inproceedings{
zhu2021deformable,
title={Deformable DETR: Deformable Transformers for End-to-End Object Detection},
auth
Download .txt
gitextract_qwmo5r_u/

├── .gitignore
├── LICENSE
├── Makefile
├── README.md
├── README_zh-CN.md
├── configs/
│   ├── _base_/
│   │   ├── datasets/
│   │   │   ├── cityscapes_detection.py
│   │   │   ├── cityscapes_instance.py
│   │   │   ├── coco_detection.py
│   │   │   ├── coco_detection_tiny.py
│   │   │   ├── coco_instance.py
│   │   │   ├── coco_instance_semantic.py
│   │   │   ├── deepfashion.py
│   │   │   ├── lvis_v0.5_instance.py
│   │   │   ├── lvis_v1_instance.py
│   │   │   ├── voc0712.py
│   │   │   └── wider_face.py
│   │   ├── default_runtime.py
│   │   ├── models/
│   │   │   ├── cascade_mask_rcnn_r50_fpn.py
│   │   │   ├── cascade_rcnn_r50_fpn.py
│   │   │   ├── fast_rcnn_r50_fpn.py
│   │   │   ├── faster_rcnn_r50_caffe_c4.py
│   │   │   ├── faster_rcnn_r50_caffe_dc5.py
│   │   │   ├── faster_rcnn_r50_fpn.py
│   │   │   ├── mask_rcnn_r50_caffe_c4.py
│   │   │   ├── mask_rcnn_r50_fpn.py
│   │   │   ├── retinanet_r50_fpn.py
│   │   │   ├── rpn_r50_caffe_c4.py
│   │   │   ├── rpn_r50_fpn.py
│   │   │   └── ssd300.py
│   │   └── schedules/
│   │       ├── schedule_1x.py
│   │       ├── schedule_20e.py
│   │       └── schedule_2x.py
│   ├── adamixer/
│   │   ├── README.md
│   │   ├── adamixer_dx101_300_query_crop_mstrain_480-800_3x_coco.py
│   │   ├── adamixer_r101_300_query_crop_mstrain_480-800_3x_coco.py
│   │   ├── adamixer_r101_mstrain_480-800_3x_coco.py
│   │   ├── adamixer_r50_1x_coco.py
│   │   ├── adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py
│   │   ├── adamixer_r50_mstrain_480-800_3x_coco.py
│   │   └── adamixer_swin_s_300_query_crop_mstrain_480-800_3x_coco.py
│   ├── albu_example/
│   │   ├── README.md
│   │   └── mask_rcnn_r50_fpn_albu_1x_coco.py
│   ├── atss/
│   │   ├── README.md
│   │   ├── atss_r101_fpn_1x_coco.py
│   │   └── atss_r50_fpn_1x_coco.py
│   ├── autoassign/
│   │   ├── README.md
│   │   └── autoassign_r50_fpn_8x2_1x_coco.py
│   ├── carafe/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_carafe_1x_coco.py
│   │   └── mask_rcnn_r50_fpn_carafe_1x_coco.py
│   ├── cascade_rcnn/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_r101_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_r101_fpn_20e_coco.py
│   │   ├── cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_r50_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_r50_fpn_20e_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py
│   │   ├── cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py
│   │   ├── cascade_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── cascade_rcnn_r101_fpn_1x_coco.py
│   │   ├── cascade_rcnn_r101_fpn_20e_coco.py
│   │   ├── cascade_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── cascade_rcnn_r50_fpn_1x_coco.py
│   │   ├── cascade_rcnn_r50_fpn_20e_coco.py
│   │   ├── cascade_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── cascade_rcnn_x101_32x4d_fpn_20e_coco.py
│   │   ├── cascade_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   └── cascade_rcnn_x101_64x4d_fpn_20e_coco.py
│   ├── cascade_rpn/
│   │   ├── README.md
│   │   ├── crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py
│   │   └── crpn_r50_caffe_fpn_1x_coco.py
│   ├── centripetalnet/
│   │   ├── README.md
│   │   └── centripetalnet_hourglass104_mstest_16x6_210e_coco.py
│   ├── cityscapes/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_1x_cityscapes.py
│   │   └── mask_rcnn_r50_fpn_1x_cityscapes.py
│   ├── cornernet/
│   │   ├── README.md
│   │   ├── cornernet_hourglass104_mstest_10x5_210e_coco.py
│   │   ├── cornernet_hourglass104_mstest_32x3_210e_coco.py
│   │   └── cornernet_hourglass104_mstest_8x6_210e_coco.py
│   ├── dcn/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_dpool_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_mdpool_1x_coco.py
│   │   ├── faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│   │   └── mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
│   ├── deepfashion/
│   │   ├── README.md
│   │   └── mask_rcnn_r50_fpn_15e_deepfashion.py
│   ├── deformable_detr/
│   │   ├── README.md
│   │   ├── deformable_detr_r50_16x2_50e_coco.py
│   │   ├── deformable_detr_refine_r50_16x2_50e_coco.py
│   │   └── deformable_detr_twostage_refine_r50_16x2_50e_coco.py
│   ├── detectors/
│   │   ├── README.md
│   │   ├── cascade_rcnn_r50_rfp_1x_coco.py
│   │   ├── cascade_rcnn_r50_sac_1x_coco.py
│   │   ├── detectors_cascade_rcnn_r50_1x_coco.py
│   │   ├── detectors_htc_r50_1x_coco.py
│   │   ├── htc_r50_rfp_1x_coco.py
│   │   └── htc_r50_sac_1x_coco.py
│   ├── detr/
│   │   ├── README.md
│   │   └── detr_r50_8x2_150e_coco.py
│   ├── double_heads/
│   │   ├── README.md
│   │   └── dh_faster_rcnn_r50_fpn_1x_coco.py
│   ├── dynamic_rcnn/
│   │   ├── README.md
│   │   └── dynamic_rcnn_r50_fpn_1x_coco.py
│   ├── empirical_attention/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_attention_0010_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_attention_1111_1x_coco.py
│   │   └── faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py
│   ├── fast_rcnn/
│   │   ├── README.md
│   │   ├── fast_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── fast_rcnn_r101_fpn_1x_coco.py
│   │   ├── fast_rcnn_r101_fpn_2x_coco.py
│   │   ├── fast_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── fast_rcnn_r50_fpn_1x_coco.py
│   │   └── fast_rcnn_r50_fpn_2x_coco.py
│   ├── faster_rcnn/
│   │   ├── README.md
│   │   ├── faster_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── faster_rcnn_r101_fpn_1x_coco.py
│   │   ├── faster_rcnn_r101_fpn_2x_coco.py
│   │   ├── faster_rcnn_r50_caffe_c4_1x_coco.py
│   │   ├── faster_rcnn_r50_caffe_dc5_1x_coco.py
│   │   ├── faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py
│   │   ├── faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py
│   │   ├── faster_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py
│   │   ├── faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py
│   │   ├── faster_rcnn_r50_fpn_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_2x_coco.py
│   │   ├── faster_rcnn_r50_fpn_bounded_iou_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_giou_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_iou_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_ohem_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_soft_nms_1x_coco.py
│   │   ├── faster_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── faster_rcnn_x101_32x4d_fpn_2x_coco.py
│   │   ├── faster_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   └── faster_rcnn_x101_64x4d_fpn_2x_coco.py
│   ├── fcos/
│   │   ├── README.md
│   │   ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py
│   │   ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py
│   │   ├── fcos_center_r50_caffe_fpn_gn-head_1x_coco.py
│   │   ├── fcos_r101_caffe_fpn_gn-head_1x_coco.py
│   │   ├── fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py
│   │   ├── fcos_r50_caffe_fpn_gn-head_1x_coco.py
│   │   ├── fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py
│   │   ├── fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py
│   │   ├── fcos_r50_torch_fpn_gn-head_4x4_1x_coco.py
│   │   └── fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py
│   ├── foveabox/
│   │   ├── README.md
│   │   ├── fovea_align_r101_fpn_gn-head_4x4_2x_coco.py
│   │   ├── fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py
│   │   ├── fovea_align_r50_fpn_gn-head_4x4_2x_coco.py
│   │   ├── fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py
│   │   ├── fovea_r101_fpn_4x4_1x_coco.py
│   │   ├── fovea_r101_fpn_4x4_2x_coco.py
│   │   ├── fovea_r50_fpn_4x4_1x_coco.py
│   │   └── fovea_r50_fpn_4x4_2x_coco.py
│   ├── fp16/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_fp16_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_fp16_1x_coco.py
│   │   └── retinanet_r50_fpn_fp16_1x_coco.py
│   ├── fpg/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py
│   │   ├── faster_rcnn_r50_fpg_crop640_50e_coco.py
│   │   ├── faster_rcnn_r50_fpn_crop640_50e_coco.py
│   │   ├── mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py
│   │   ├── mask_rcnn_r50_fpg_crop640_50e_coco.py
│   │   ├── mask_rcnn_r50_fpn_crop640_50e_coco.py
│   │   ├── retinanet_r50_fpg-chn128_crop640_50e_coco.py
│   │   └── retinanet_r50_fpg_crop640_50e_coco.py
│   ├── free_anchor/
│   │   ├── README.md
│   │   ├── retinanet_free_anchor_r101_fpn_1x_coco.py
│   │   ├── retinanet_free_anchor_r50_fpn_1x_coco.py
│   │   └── retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py
│   ├── fsaf/
│   │   ├── README.md
│   │   ├── fsaf_r101_fpn_1x_coco.py
│   │   ├── fsaf_r50_fpn_1x_coco.py
│   │   └── fsaf_x101_64x4d_fpn_1x_coco.py
│   ├── gcnet/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│   │   ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│   │   └── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│   ├── gfl/
│   │   ├── README.md
│   │   ├── gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py
│   │   ├── gfl_r101_fpn_mstrain_2x_coco.py
│   │   ├── gfl_r50_fpn_1x_coco.py
│   │   ├── gfl_r50_fpn_mstrain_2x_coco.py
│   │   ├── gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py
│   │   └── gfl_x101_32x4d_fpn_mstrain_2x_coco.py
│   ├── ghm/
│   │   ├── README.md
│   │   ├── retinanet_ghm_r101_fpn_1x_coco.py
│   │   ├── retinanet_ghm_r50_fpn_1x_coco.py
│   │   ├── retinanet_ghm_x101_32x4d_fpn_1x_coco.py
│   │   └── retinanet_ghm_x101_64x4d_fpn_1x_coco.py
│   ├── gn/
│   │   ├── README.md
│   │   ├── mask_rcnn_r101_fpn_gn-all_2x_coco.py
│   │   ├── mask_rcnn_r101_fpn_gn-all_3x_coco.py
│   │   ├── mask_rcnn_r50_fpn_gn-all_2x_coco.py
│   │   ├── mask_rcnn_r50_fpn_gn-all_3x_coco.py
│   │   ├── mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py
│   │   └── mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py
│   ├── gn+ws/
│   │   ├── README.md
│   │   ├── faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py
│   │   ├── faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py
│   │   ├── faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py
│   │   ├── faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py
│   │   ├── mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py
│   │   ├── mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py
│   │   ├── mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py
│   │   ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py
│   │   └── mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py
│   ├── grid_rcnn/
│   │   ├── README.md
│   │   ├── grid_rcnn_r101_fpn_gn-head_2x_coco.py
│   │   ├── grid_rcnn_r50_fpn_gn-head_1x_coco.py
│   │   ├── grid_rcnn_r50_fpn_gn-head_2x_coco.py
│   │   ├── grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py
│   │   └── grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py
│   ├── groie/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_groie_1x_coco.py
│   │   ├── grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_groie_1x_coco.py
│   │   └── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py
│   ├── guided_anchoring/
│   │   ├── README.md
│   │   ├── ga_fast_r50_caffe_fpn_1x_coco.py
│   │   ├── ga_faster_r101_caffe_fpn_1x_coco.py
│   │   ├── ga_faster_r50_caffe_fpn_1x_coco.py
│   │   ├── ga_faster_r50_fpn_1x_coco.py
│   │   ├── ga_faster_x101_32x4d_fpn_1x_coco.py
│   │   ├── ga_faster_x101_64x4d_fpn_1x_coco.py
│   │   ├── ga_retinanet_r101_caffe_fpn_1x_coco.py
│   │   ├── ga_retinanet_r101_caffe_fpn_mstrain_2x.py
│   │   ├── ga_retinanet_r50_caffe_fpn_1x_coco.py
│   │   ├── ga_retinanet_r50_fpn_1x_coco.py
│   │   ├── ga_retinanet_x101_32x4d_fpn_1x_coco.py
│   │   ├── ga_retinanet_x101_64x4d_fpn_1x_coco.py
│   │   ├── ga_rpn_r101_caffe_fpn_1x_coco.py
│   │   ├── ga_rpn_r50_caffe_fpn_1x_coco.py
│   │   ├── ga_rpn_r50_fpn_1x_coco.py
│   │   ├── ga_rpn_x101_32x4d_fpn_1x_coco.py
│   │   └── ga_rpn_x101_64x4d_fpn_1x_coco.py
│   ├── hrnet/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py
│   │   ├── cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py
│   │   ├── cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py
│   │   ├── cascade_rcnn_hrnetv2p_w18_20e_coco.py
│   │   ├── cascade_rcnn_hrnetv2p_w32_20e_coco.py
│   │   ├── cascade_rcnn_hrnetv2p_w40_20e_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w18_1x_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w18_2x_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w32_1x_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w32_2x_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w40_1x_coco.py
│   │   ├── faster_rcnn_hrnetv2p_w40_2x_coco.py
│   │   ├── fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py
│   │   ├── fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py
│   │   ├── fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py
│   │   ├── fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py
│   │   ├── fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py
│   │   ├── fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py
│   │   ├── fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py
│   │   ├── htc_hrnetv2p_w18_20e_coco.py
│   │   ├── htc_hrnetv2p_w32_20e_coco.py
│   │   ├── htc_hrnetv2p_w40_20e_coco.py
│   │   ├── htc_hrnetv2p_w40_28e_coco.py
│   │   ├── htc_x101_64x4d_fpn_16x1_28e_coco.py
│   │   ├── mask_rcnn_hrnetv2p_w18_1x_coco.py
│   │   ├── mask_rcnn_hrnetv2p_w18_2x_coco.py
│   │   ├── mask_rcnn_hrnetv2p_w32_1x_coco.py
│   │   ├── mask_rcnn_hrnetv2p_w32_2x_coco.py
│   │   ├── mask_rcnn_hrnetv2p_w40_1x_coco.py
│   │   └── mask_rcnn_hrnetv2p_w40_2x_coco.py
│   ├── htc/
│   │   ├── README.md
│   │   ├── htc_r101_fpn_20e_coco.py
│   │   ├── htc_r50_fpn_1x_coco.py
│   │   ├── htc_r50_fpn_20e_coco.py
│   │   ├── htc_without_semantic_r50_fpn_1x_coco.py
│   │   ├── htc_x101_32x4d_fpn_16x1_20e_coco.py
│   │   ├── htc_x101_64x4d_fpn_16x1_20e_coco.py
│   │   └── htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py
│   ├── instaboost/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py
│   │   ├── cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py
│   │   ├── cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py
│   │   ├── mask_rcnn_r101_fpn_instaboost_4x_coco.py
│   │   ├── mask_rcnn_r50_fpn_instaboost_4x_coco.py
│   │   └── mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py
│   ├── ld/
│   │   ├── README.md
│   │   ├── ld_r101_gflv1_r101dcn_fpn_coco_2x.py
│   │   ├── ld_r18_gflv1_r101_fpn_coco_1x.py
│   │   ├── ld_r34_gflv1_r101_fpn_coco_1x.py
│   │   └── ld_r50_gflv1_r101_fpn_coco_1x.py
│   ├── legacy_1.x/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_r50_fpn_1x_coco_v1.py
│   │   ├── faster_rcnn_r50_fpn_1x_coco_v1.py
│   │   ├── mask_rcnn_r50_fpn_1x_coco_v1.py
│   │   ├── retinanet_r50_caffe_fpn_1x_coco_v1.py
│   │   ├── retinanet_r50_fpn_1x_coco_v1.py
│   │   └── ssd300_coco_v1.py
│   ├── libra_rcnn/
│   │   ├── README.md
│   │   ├── libra_fast_rcnn_r50_fpn_1x_coco.py
│   │   ├── libra_faster_rcnn_r101_fpn_1x_coco.py
│   │   ├── libra_faster_rcnn_r50_fpn_1x_coco.py
│   │   ├── libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   └── libra_retinanet_r50_fpn_1x_coco.py
│   ├── lvis/
│   │   ├── README.md
│   │   ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│   │   ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│   │   ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│   │   ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│   │   ├── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│   │   └── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│   ├── mask_rcnn/
│   │   ├── README.md
│   │   ├── mask_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_1x_coco.py
│   │   ├── mask_rcnn_r101_fpn_2x_coco.py
│   │   ├── mask_rcnn_r50_caffe_c4_1x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
│   │   ├── mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py
│   │   ├── mask_rcnn_r50_fpn_1x_coco.py
│   │   ├── mask_rcnn_r50_fpn_2x_coco.py
│   │   ├── mask_rcnn_r50_fpn_poly_1x_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── mask_rcnn_x101_32x4d_fpn_2x_coco.py
│   │   ├── mask_rcnn_x101_32x8d_fpn_1x_coco.py
│   │   ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py
│   │   ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py
│   │   ├── mask_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   └── mask_rcnn_x101_64x4d_fpn_2x_coco.py
│   ├── ms_rcnn/
│   │   ├── README.md
│   │   ├── ms_rcnn_r101_caffe_fpn_1x_coco.py
│   │   ├── ms_rcnn_r101_caffe_fpn_2x_coco.py
│   │   ├── ms_rcnn_r50_caffe_fpn_1x_coco.py
│   │   ├── ms_rcnn_r50_caffe_fpn_2x_coco.py
│   │   ├── ms_rcnn_r50_fpn_1x_coco.py
│   │   ├── ms_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── ms_rcnn_x101_64x4d_fpn_1x_coco.py
│   │   └── ms_rcnn_x101_64x4d_fpn_2x_coco.py
│   ├── nas_fcos/
│   │   ├── README.md
│   │   ├── nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py
│   │   └── nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py
│   ├── nas_fpn/
│   │   ├── README.md
│   │   ├── retinanet_r50_fpn_crop640_50e_coco.py
│   │   └── retinanet_r50_nasfpn_crop640_50e_coco.py
│   ├── paa/
│   │   ├── README.md
│   │   ├── paa_r101_fpn_1x_coco.py
│   │   ├── paa_r101_fpn_2x_coco.py
│   │   ├── paa_r101_fpn_mstrain_3x_coco.py
│   │   ├── paa_r50_fpn_1.5x_coco.py
│   │   ├── paa_r50_fpn_1x_coco.py
│   │   ├── paa_r50_fpn_2x_coco.py
│   │   └── paa_r50_fpn_mstrain_3x_coco.py
│   ├── pafpn/
│   │   ├── README.md
│   │   └── faster_rcnn_r50_pafpn_1x_coco.py
│   ├── pascal_voc/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_1x_voc0712.py
│   │   ├── faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py
│   │   ├── retinanet_r50_fpn_1x_voc0712.py
│   │   ├── ssd300_voc0712.py
│   │   └── ssd512_voc0712.py
│   ├── pisa/
│   │   ├── README.md
│   │   ├── pisa_faster_rcnn_r50_fpn_1x_coco.py
│   │   ├── pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── pisa_mask_rcnn_r50_fpn_1x_coco.py
│   │   ├── pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py
│   │   ├── pisa_retinanet_r50_fpn_1x_coco.py
│   │   ├── pisa_retinanet_x101_32x4d_fpn_1x_coco.py
│   │   ├── pisa_ssd300_coco.py
│   │   └── pisa_ssd512_coco.py
│   ├── point_rend/
│   │   ├── README.md
│   │   ├── point_rend_r50_caffe_fpn_mstrain_1x_coco.py
│   │   └── point_rend_r50_caffe_fpn_mstrain_3x_coco.py
│   ├── regnet/
│   │   ├── README.md
│   │   ├── faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py
│   │   ├── faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py
│   │   ├── faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
│   │   ├── mask_rcnn_regnetx-12GF_fpn_1x_coco.py
│   │   ├── mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py
│   │   ├── mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py
│   │   ├── mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
│   │   ├── mask_rcnn_regnetx-4GF_fpn_1x_coco.py
│   │   ├── mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py
│   │   ├── mask_rcnn_regnetx-8GF_fpn_1x_coco.py
│   │   ├── retinanet_regnetx-1.6GF_fpn_1x_coco.py
│   │   ├── retinanet_regnetx-3.2GF_fpn_1x_coco.py
│   │   └── retinanet_regnetx-800MF_fpn_1x_coco.py
│   ├── reppoints/
│   │   ├── README.md
│   │   ├── bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py
│   │   ├── bbox_r50_grid_fpn_gn-neck+head_1x_coco.py
│   │   ├── reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py
│   │   ├── reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py
│   │   ├── reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py
│   │   ├── reppoints_moment_r50_fpn_1x_coco.py
│   │   ├── reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py
│   │   ├── reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py
│   │   ├── reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py
│   │   └── reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py
│   ├── res2net/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_r2_101_fpn_20e_coco.py
│   │   ├── cascade_rcnn_r2_101_fpn_20e_coco.py
│   │   ├── faster_rcnn_r2_101_fpn_2x_coco.py
│   │   ├── htc_r2_101_fpn_20e_coco.py
│   │   └── mask_rcnn_r2_101_fpn_2x_coco.py
│   ├── resnest/
│   │   ├── README.md
│   │   ├── cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│   │   ├── cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│   │   ├── cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│   │   ├── cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│   │   ├── faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│   │   ├── faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│   │   ├── mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│   │   └── mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│   ├── retinanet/
│   │   ├── README.md
│   │   ├── retinanet_r101_caffe_fpn_1x_coco.py
│   │   ├── retinanet_r101_fpn_1x_coco.py
│   │   ├── retinanet_r101_fpn_2x_coco.py
│   │   ├── retinanet_r50_caffe_fpn_1x_coco.py
│   │   ├── retinanet_r50_caffe_fpn_mstrain_1x_coco.py
│   │   ├── retinanet_r50_caffe_fpn_mstrain_2x_coco.py
│   │   ├── retinanet_r50_caffe_fpn_mstrain_3x_coco.py
│   │   ├── retinanet_r50_fpn_1x_coco.py
│   │   ├── retinanet_r50_fpn_2x_coco.py
│   │   ├── retinanet_x101_32x4d_fpn_1x_coco.py
│   │   ├── retinanet_x101_32x4d_fpn_2x_coco.py
│   │   ├── retinanet_x101_64x4d_fpn_1x_coco.py
│   │   └── retinanet_x101_64x4d_fpn_2x_coco.py
│   ├── rpn/
│   │   ├── README.md
│   │   ├── rpn_r101_caffe_fpn_1x_coco.py
│   │   ├── rpn_r101_fpn_1x_coco.py
│   │   ├── rpn_r101_fpn_2x_coco.py
│   │   ├── rpn_r50_caffe_c4_1x_coco.py
│   │   ├── rpn_r50_caffe_fpn_1x_coco.py
│   │   ├── rpn_r50_fpn_1x_coco.py
│   │   ├── rpn_r50_fpn_2x_coco.py
│   │   ├── rpn_x101_32x4d_fpn_1x_coco.py
│   │   ├── rpn_x101_32x4d_fpn_2x_coco.py
│   │   ├── rpn_x101_64x4d_fpn_1x_coco.py
│   │   └── rpn_x101_64x4d_fpn_2x_coco.py
│   ├── sabl/
│   │   ├── README.md
│   │   ├── sabl_cascade_rcnn_r101_fpn_1x_coco.py
│   │   ├── sabl_cascade_rcnn_r50_fpn_1x_coco.py
│   │   ├── sabl_faster_rcnn_r101_fpn_1x_coco.py
│   │   ├── sabl_faster_rcnn_r50_fpn_1x_coco.py
│   │   ├── sabl_retinanet_r101_fpn_1x_coco.py
│   │   ├── sabl_retinanet_r101_fpn_gn_1x_coco.py
│   │   ├── sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py
│   │   ├── sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py
│   │   ├── sabl_retinanet_r50_fpn_1x_coco.py
│   │   └── sabl_retinanet_r50_fpn_gn_1x_coco.py
│   ├── scnet/
│   │   ├── README.md
│   │   ├── scnet_r101_fpn_20e_coco.py
│   │   ├── scnet_r50_fpn_1x_coco.py
│   │   ├── scnet_r50_fpn_20e_coco.py
│   │   ├── scnet_x101_64x4d_fpn_20e_coco.py
│   │   └── scnet_x101_64x4d_fpn_8x1_20e_coco.py
│   ├── scratch/
│   │   ├── README.md
│   │   ├── faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py
│   │   └── mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py
│   ├── sparse_rcnn/
│   │   ├── README.md
│   │   ├── sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
│   │   ├── sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py
│   │   ├── sparse_rcnn_r50_fpn_1x_coco.py
│   │   ├── sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
│   │   └── sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py
│   ├── ssd/
│   │   ├── README.md
│   │   ├── ssd300_coco.py
│   │   └── ssd512_coco.py
│   ├── tridentnet/
│   │   ├── README.md
│   │   ├── tridentnet_r50_caffe_1x_coco.py
│   │   ├── tridentnet_r50_caffe_mstrain_1x_coco.py
│   │   └── tridentnet_r50_caffe_mstrain_3x_coco.py
│   ├── vfnet/
│   │   ├── README.md
│   │   ├── vfnet_r101_fpn_1x_coco.py
│   │   ├── vfnet_r101_fpn_2x_coco.py
│   │   ├── vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│   │   ├── vfnet_r101_fpn_mstrain_2x_coco.py
│   │   ├── vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│   │   ├── vfnet_r2_101_fpn_mstrain_2x_coco.py
│   │   ├── vfnet_r50_fpn_1x_coco.py
│   │   ├── vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│   │   ├── vfnet_r50_fpn_mstrain_2x_coco.py
│   │   ├── vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│   │   ├── vfnet_x101_32x4d_fpn_mstrain_2x_coco.py
│   │   ├── vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│   │   └── vfnet_x101_64x4d_fpn_mstrain_2x_coco.py
│   ├── wider_face/
│   │   ├── README.md
│   │   └── ssd300_wider_face.py
│   ├── yolact/
│   │   ├── README.md
│   │   ├── yolact_r101_1x8_coco.py
│   │   ├── yolact_r50_1x8_coco.py
│   │   └── yolact_r50_8x8_coco.py
│   ├── yolo/
│   │   ├── README.md
│   │   ├── yolov3_d53_320_273e_coco.py
│   │   ├── yolov3_d53_mstrain-416_273e_coco.py
│   │   └── yolov3_d53_mstrain-608_273e_coco.py
│   └── yolof/
│       ├── README.md
│       ├── yolof_r50_c5_8x8_1x_coco.py
│       └── yolof_r50_c5_8x8_iter-1x_coco.py
├── demo/
│   ├── MMDet_Tutorial.ipynb
│   ├── create_result_gif.py
│   ├── image_demo.py
│   ├── inference_demo.ipynb
│   ├── video_demo.py
│   └── webcam_demo.py
├── demo.py
├── docker/
│   ├── Dockerfile
│   └── serve/
│       ├── Dockerfile
│       ├── config.properties
│       └── entrypoint.sh
├── docs/
│   ├── 1_exist_data_model.md
│   ├── 2_new_data_model.md
│   ├── 3_exist_data_new_model.md
│   ├── Makefile
│   ├── api.rst
│   ├── changelog.md
│   ├── compatibility.md
│   ├── conf.py
│   ├── conventions.md
│   ├── faq.md
│   ├── get_started.md
│   ├── index.rst
│   ├── make.bat
│   ├── model_zoo.md
│   ├── projects.md
│   ├── robustness_benchmarking.md
│   ├── stat.py
│   ├── tutorials/
│   │   ├── config.md
│   │   ├── customize_dataset.md
│   │   ├── customize_losses.md
│   │   ├── customize_models.md
│   │   ├── customize_runtime.md
│   │   ├── data_pipeline.md
│   │   ├── finetune.md
│   │   ├── index.rst
│   │   ├── onnx2tensorrt.md
│   │   └── pytorch2onnx.md
│   └── useful_tools.md
├── mmdet/
│   ├── __init__.py
│   ├── apis/
│   │   ├── __init__.py
│   │   ├── inference.py
│   │   ├── test.py
│   │   └── train.py
│   ├── core/
│   │   ├── __init__.py
│   │   ├── anchor/
│   │   │   ├── __init__.py
│   │   │   ├── anchor_generator.py
│   │   │   ├── builder.py
│   │   │   ├── point_generator.py
│   │   │   └── utils.py
│   │   ├── bbox/
│   │   │   ├── __init__.py
│   │   │   ├── assigners/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── approx_max_iou_assigner.py
│   │   │   │   ├── assign_result.py
│   │   │   │   ├── atss_assigner.py
│   │   │   │   ├── base_assigner.py
│   │   │   │   ├── center_region_assigner.py
│   │   │   │   ├── grid_assigner.py
│   │   │   │   ├── hungarian_assigner.py
│   │   │   │   ├── max_iou_assigner.py
│   │   │   │   ├── point_assigner.py
│   │   │   │   ├── region_assigner.py
│   │   │   │   └── uniform_assigner.py
│   │   │   ├── builder.py
│   │   │   ├── coder/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_bbox_coder.py
│   │   │   │   ├── bucketing_bbox_coder.py
│   │   │   │   ├── delta_xywh_bbox_coder.py
│   │   │   │   ├── legacy_delta_xywh_bbox_coder.py
│   │   │   │   ├── pseudo_bbox_coder.py
│   │   │   │   ├── tblr_bbox_coder.py
│   │   │   │   └── yolo_bbox_coder.py
│   │   │   ├── demodata.py
│   │   │   ├── iou_calculators/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── builder.py
│   │   │   │   └── iou2d_calculator.py
│   │   │   ├── match_costs/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── builder.py
│   │   │   │   └── match_cost.py
│   │   │   ├── samplers/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_sampler.py
│   │   │   │   ├── combined_sampler.py
│   │   │   │   ├── instance_balanced_pos_sampler.py
│   │   │   │   ├── iou_balanced_neg_sampler.py
│   │   │   │   ├── ohem_sampler.py
│   │   │   │   ├── pseudo_sampler.py
│   │   │   │   ├── random_sampler.py
│   │   │   │   ├── sampling_result.py
│   │   │   │   └── score_hlr_sampler.py
│   │   │   └── transforms.py
│   │   ├── evaluation/
│   │   │   ├── __init__.py
│   │   │   ├── bbox_overlaps.py
│   │   │   ├── class_names.py
│   │   │   ├── eval_hooks.py
│   │   │   ├── mean_ap.py
│   │   │   └── recall.py
│   │   ├── export/
│   │   │   ├── __init__.py
│   │   │   ├── model_wrappers.py
│   │   │   ├── onnx_helper.py
│   │   │   └── pytorch2onnx.py
│   │   ├── mask/
│   │   │   ├── __init__.py
│   │   │   ├── mask_target.py
│   │   │   ├── structures.py
│   │   │   └── utils.py
│   │   ├── post_processing/
│   │   │   ├── __init__.py
│   │   │   ├── bbox_nms.py
│   │   │   └── merge_augs.py
│   │   ├── utils/
│   │   │   ├── __init__.py
│   │   │   ├── dist_utils.py
│   │   │   └── misc.py
│   │   └── visualization/
│   │       ├── __init__.py
│   │       └── image.py
│   ├── datasets/
│   │   ├── __init__.py
│   │   ├── api_wrappers/
│   │   │   ├── __init__.py
│   │   │   └── coco_api.py
│   │   ├── builder.py
│   │   ├── cityscapes.py
│   │   ├── coco.py
│   │   ├── custom.py
│   │   ├── dataset_wrappers.py
│   │   ├── deepfashion.py
│   │   ├── lvis.py
│   │   ├── pipelines/
│   │   │   ├── __init__.py
│   │   │   ├── auto_augment.py
│   │   │   ├── compose.py
│   │   │   ├── formating.py
│   │   │   ├── instaboost.py
│   │   │   ├── loading.py
│   │   │   ├── test_time_aug.py
│   │   │   └── transforms.py
│   │   ├── samplers/
│   │   │   ├── __init__.py
│   │   │   ├── distributed_sampler.py
│   │   │   └── group_sampler.py
│   │   ├── utils.py
│   │   ├── voc.py
│   │   ├── wider_face.py
│   │   └── xml_style.py
│   ├── models/
│   │   ├── __init__.py
│   │   ├── backbones/
│   │   │   ├── __init__.py
│   │   │   ├── darknet.py
│   │   │   ├── detectors_resnet.py
│   │   │   ├── detectors_resnext.py
│   │   │   ├── hourglass.py
│   │   │   ├── hrnet.py
│   │   │   ├── regnet.py
│   │   │   ├── res2net.py
│   │   │   ├── resnest.py
│   │   │   ├── resnet.py
│   │   │   ├── resnext.py
│   │   │   ├── ssd_vgg.py
│   │   │   ├── swin.py
│   │   │   └── trident_resnet.py
│   │   ├── builder.py
│   │   ├── dense_heads/
│   │   │   ├── __init__.py
│   │   │   ├── anchor_free_head.py
│   │   │   ├── anchor_head.py
│   │   │   ├── atss_head.py
│   │   │   ├── autoassign_head.py
│   │   │   ├── base_dense_head.py
│   │   │   ├── cascade_rpn_head.py
│   │   │   ├── centripetal_head.py
│   │   │   ├── corner_head.py
│   │   │   ├── deformable_detr_head.py
│   │   │   ├── dense_test_mixins.py
│   │   │   ├── detr_head.py
│   │   │   ├── embedding_rpn_head.py
│   │   │   ├── fcos_head.py
│   │   │   ├── fovea_head.py
│   │   │   ├── free_anchor_retina_head.py
│   │   │   ├── fsaf_head.py
│   │   │   ├── ga_retina_head.py
│   │   │   ├── ga_rpn_head.py
│   │   │   ├── gfl_head.py
│   │   │   ├── guided_anchor_head.py
│   │   │   ├── ld_head.py
│   │   │   ├── nasfcos_head.py
│   │   │   ├── paa_head.py
│   │   │   ├── pisa_retinanet_head.py
│   │   │   ├── pisa_ssd_head.py
│   │   │   ├── query_generator.py
│   │   │   ├── reppoints_head.py
│   │   │   ├── retina_head.py
│   │   │   ├── retina_sepbn_head.py
│   │   │   ├── rpn_head.py
│   │   │   ├── rpn_test_mixin.py
│   │   │   ├── sabl_retina_head.py
│   │   │   ├── ssd_head.py
│   │   │   ├── vfnet_head.py
│   │   │   ├── yolact_head.py
│   │   │   ├── yolo_head.py
│   │   │   └── yolof_head.py
│   │   ├── detectors/
│   │   │   ├── __init__.py
│   │   │   ├── atss.py
│   │   │   ├── autoassign.py
│   │   │   ├── base.py
│   │   │   ├── cascade_rcnn.py
│   │   │   ├── cornernet.py
│   │   │   ├── deformable_detr.py
│   │   │   ├── detr.py
│   │   │   ├── fast_rcnn.py
│   │   │   ├── faster_rcnn.py
│   │   │   ├── fcos.py
│   │   │   ├── fovea.py
│   │   │   ├── fsaf.py
│   │   │   ├── gfl.py
│   │   │   ├── grid_rcnn.py
│   │   │   ├── htc.py
│   │   │   ├── kd_one_stage.py
│   │   │   ├── mask_rcnn.py
│   │   │   ├── mask_scoring_rcnn.py
│   │   │   ├── nasfcos.py
│   │   │   ├── paa.py
│   │   │   ├── point_rend.py
│   │   │   ├── query_based.py
│   │   │   ├── reppoints_detector.py
│   │   │   ├── retinanet.py
│   │   │   ├── rpn.py
│   │   │   ├── scnet.py
│   │   │   ├── single_stage.py
│   │   │   ├── sparse_rcnn.py
│   │   │   ├── trident_faster_rcnn.py
│   │   │   ├── two_stage.py
│   │   │   ├── vfnet.py
│   │   │   ├── yolact.py
│   │   │   ├── yolo.py
│   │   │   └── yolof.py
│   │   ├── losses/
│   │   │   ├── __init__.py
│   │   │   ├── accuracy.py
│   │   │   ├── ae_loss.py
│   │   │   ├── balanced_l1_loss.py
│   │   │   ├── cross_entropy_loss.py
│   │   │   ├── focal_loss.py
│   │   │   ├── gaussian_focal_loss.py
│   │   │   ├── gfocal_loss.py
│   │   │   ├── ghm_loss.py
│   │   │   ├── iou_loss.py
│   │   │   ├── kd_loss.py
│   │   │   ├── mse_loss.py
│   │   │   ├── pisa_loss.py
│   │   │   ├── smooth_l1_loss.py
│   │   │   ├── utils.py
│   │   │   └── varifocal_loss.py
│   │   ├── necks/
│   │   │   ├── __init__.py
│   │   │   ├── bfp.py
│   │   │   ├── channel_mapper.py
│   │   │   ├── dilated_encoder.py
│   │   │   ├── fpg.py
│   │   │   ├── fpn.py
│   │   │   ├── fpn_carafe.py
│   │   │   ├── hrfpn.py
│   │   │   ├── identity_fpn.py
│   │   │   ├── nas_fpn.py
│   │   │   ├── nasfcos_fpn.py
│   │   │   ├── pafpn.py
│   │   │   ├── rfp.py
│   │   │   └── yolo_neck.py
│   │   ├── roi_heads/
│   │   │   ├── __init__.py
│   │   │   ├── adamixer_decoder.py
│   │   │   ├── base_roi_head.py
│   │   │   ├── bbox_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── adamixer_decoder_stage.py
│   │   │   │   ├── adaptive_mixing_operator.py
│   │   │   │   ├── bbox_head.py
│   │   │   │   ├── convfc_bbox_head.py
│   │   │   │   ├── dii_head.py
│   │   │   │   ├── double_bbox_head.py
│   │   │   │   ├── sabl_head.py
│   │   │   │   ├── sampling_3d_operator.py
│   │   │   │   └── scnet_bbox_head.py
│   │   │   ├── cascade_roi_head.py
│   │   │   ├── double_roi_head.py
│   │   │   ├── dynamic_roi_head.py
│   │   │   ├── grid_roi_head.py
│   │   │   ├── htc_roi_head.py
│   │   │   ├── mask_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── coarse_mask_head.py
│   │   │   │   ├── fcn_mask_head.py
│   │   │   │   ├── feature_relay_head.py
│   │   │   │   ├── fused_semantic_head.py
│   │   │   │   ├── global_context_head.py
│   │   │   │   ├── grid_head.py
│   │   │   │   ├── htc_mask_head.py
│   │   │   │   ├── mask_point_head.py
│   │   │   │   ├── maskiou_head.py
│   │   │   │   ├── scnet_mask_head.py
│   │   │   │   └── scnet_semantic_head.py
│   │   │   ├── mask_scoring_roi_head.py
│   │   │   ├── pisa_roi_head.py
│   │   │   ├── point_rend_roi_head.py
│   │   │   ├── roi_extractors/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_roi_extractor.py
│   │   │   │   ├── generic_roi_extractor.py
│   │   │   │   └── single_level_roi_extractor.py
│   │   │   ├── scnet_roi_head.py
│   │   │   ├── shared_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   └── res_layer.py
│   │   │   ├── sparse_roi_head.py
│   │   │   ├── standard_roi_head.py
│   │   │   ├── test_mixins.py
│   │   │   └── trident_roi_head.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       ├── builder.py
│   │       ├── gaussian_target.py
│   │       ├── positional_encoding.py
│   │       ├── res_layer.py
│   │       └── transformer.py
│   ├── temp.txt
│   ├── utils/
│   │   ├── __init__.py
│   │   ├── collect_env.py
│   │   ├── contextmanagers.py
│   │   ├── logger.py
│   │   ├── profiling.py
│   │   ├── util_mixins.py
│   │   └── util_random.py
│   └── version.py
├── pytest.ini
├── requirements/
│   ├── build.txt
│   ├── docs.txt
│   ├── optional.txt
│   ├── readthedocs.txt
│   ├── runtime.txt
│   └── tests.txt
├── requirements.txt
├── setup.cfg
├── setup.py
├── test_module.py
├── tests/
│   ├── test_data/
│   │   ├── test_datasets/
│   │   │   ├── test_coco_dataset.py
│   │   │   ├── test_common.py
│   │   │   ├── test_custom_dataset.py
│   │   │   ├── test_dataset_wrapper.py
│   │   │   └── test_xml_dataset.py
│   │   ├── test_pipelines/
│   │   │   ├── test_formatting.py
│   │   │   ├── test_loading.py
│   │   │   ├── test_sampler.py
│   │   │   └── test_transform/
│   │   │       ├── test_img_augment.py
│   │   │       ├── test_models_aug_test.py
│   │   │       ├── test_rotate.py
│   │   │       ├── test_shear.py
│   │   │       ├── test_transform.py
│   │   │       └── test_translate.py
│   │   └── test_utils.py
│   ├── test_metrics/
│   │   ├── test_box_overlap.py
│   │   └── test_losses.py
│   ├── test_models/
│   │   ├── test_backbones/
│   │   │   ├── __init__.py
│   │   │   ├── test_hourglass.py
│   │   │   ├── test_regnet.py
│   │   │   ├── test_renext.py
│   │   │   ├── test_res2net.py
│   │   │   ├── test_resnest.py
│   │   │   ├── test_resnet.py
│   │   │   ├── test_trident_resnet.py
│   │   │   └── utils.py
│   │   ├── test_dense_heads/
│   │   │   ├── test_anchor_head.py
│   │   │   ├── test_atss_head.py
│   │   │   ├── test_autoassign_head.py
│   │   │   ├── test_corner_head.py
│   │   │   ├── test_detr_head.py
│   │   │   ├── test_fcos_head.py
│   │   │   ├── test_fsaf_head.py
│   │   │   ├── test_ga_anchor_head.py
│   │   │   ├── test_gfl_head.py
│   │   │   ├── test_ld_head.py
│   │   │   ├── test_paa_head.py
│   │   │   ├── test_pisa_head.py
│   │   │   ├── test_sabl_retina_head.py
│   │   │   ├── test_vfnet_head.py
│   │   │   ├── test_yolact_head.py
│   │   │   └── test_yolof_head.py
│   │   ├── test_forward.py
│   │   ├── test_loss.py
│   │   ├── test_necks.py
│   │   ├── test_roi_heads/
│   │   │   ├── __init__.py
│   │   │   ├── test_bbox_head.py
│   │   │   ├── test_mask_head.py
│   │   │   ├── test_roi_extractor.py
│   │   │   ├── test_sabl_bbox_head.py
│   │   │   └── utils.py
│   │   └── test_utils/
│   │       ├── test_position_encoding.py
│   │       └── test_transformer.py
│   ├── test_onnx/
│   │   ├── __init__.py
│   │   ├── test_head.py
│   │   ├── test_neck.py
│   │   └── utils.py
│   ├── test_runtime/
│   │   ├── async_benchmark.py
│   │   ├── test_async.py
│   │   ├── test_config.py
│   │   ├── test_eval_hook.py
│   │   └── test_fp16.py
│   └── test_utils/
│       ├── test_anchor.py
│       ├── test_assigner.py
│       ├── test_coder.py
│       ├── test_masks.py
│       ├── test_misc.py
│       ├── test_version.py
│       └── test_visualization.py
├── tools/
│   ├── analysis_tools/
│   │   ├── analyze_logs.py
│   │   ├── analyze_results.py
│   │   ├── benchmark.py
│   │   ├── coco_error_analysis.py
│   │   ├── eval_metric.py
│   │   ├── get_flops.py
│   │   ├── robustness_eval.py
│   │   └── test_robustness.py
│   ├── dataset_converters/
│   │   ├── cityscapes.py
│   │   └── pascal_voc.py
│   ├── deployment/
│   │   ├── mmdet2torchserve.py
│   │   ├── mmdet_handler.py
│   │   ├── onnx2tensorrt.py
│   │   ├── pytorch2onnx.py
│   │   └── test.py
│   ├── dist_test.sh
│   ├── dist_train.sh
│   ├── misc/
│   │   ├── browse_dataset.py
│   │   └── print_config.py
│   ├── model_converters/
│   │   ├── detectron2pytorch.py
│   │   ├── publish_model.py
│   │   ├── regnet2mmdet.py
│   │   └── upgrade_model_version.py
│   ├── slurm_test.sh
│   ├── slurm_train.sh
│   ├── test.py
│   └── train.py
├── visualizations/
│   └── README.md
└── visualize_sampling_points.ipynb
Download .txt
SYMBOL INDEX (2194 symbols across 339 files)

FILE: configs/adamixer/adamixer_r50_1x_coco.py
  function __get_debug (line 1) | def __get_debug():
  function __date (line 185) | def __date():

FILE: demo/create_result_gif.py
  function parse_args (line 16) | def parse_args():
  function _generate_batch_data (line 31) | def _generate_batch_data(sampler, batch_size):
  function create_gif (line 42) | def create_gif(frames, gif_name, duration=2):
  function create_frame_by_matplotlib (line 57) | def create_frame_by_matplotlib(image_dir,
  function main (line 155) | def main():

FILE: demo/image_demo.py
  function parse_args (line 8) | def parse_args():
  function main (line 25) | def main(args):
  function async_main (line 34) | async def async_main(args):

FILE: demo/video_demo.py
  function parse_args (line 9) | def parse_args():
  function main (line 29) | def main():

FILE: demo/webcam_demo.py
  function parse_args (line 9) | def parse_args():
  function main (line 23) | def main():

FILE: docs/conf.py
  function get_version (line 27) | def get_version():
  function builder_inited_handler (line 85) | def builder_inited_handler(app):
  function setup (line 89) | def setup(app):

FILE: mmdet/__init__.py
  function digit_version (line 6) | def digit_version(version_str):

FILE: mmdet/apis/inference.py
  function init_detector (line 16) | def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=...
  class LoadImage (line 62) | class LoadImage(object):
    method __call__ (line 68) | def __call__(self, results):
  function inference_detector (line 95) | def inference_detector(model, imgs):
  function async_inference_detector (line 161) | async def async_inference_detector(model, imgs):
  function show_result_pyplot (line 218) | def show_result_pyplot(model,

FILE: mmdet/apis/test.py
  function single_gpu_test (line 16) | def single_gpu_test(model,
  function multi_gpu_test (line 69) | def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
  function collect_results_cpu (line 117) | def collect_results_cpu(result_part, size, tmpdir=None):
  function collect_results_gpu (line 160) | def collect_results_gpu(result_part, size):

FILE: mmdet/apis/train.py
  function set_random_seed (line 18) | def set_random_seed(seed, deterministic=False):
  function train_detector (line 37) | def train_detector(model,

FILE: mmdet/core/anchor/anchor_generator.py
  class AnchorGenerator (line 10) | class AnchorGenerator(object):
    method __init__ (line 58) | def __init__(self,
    method num_base_anchors (line 113) | def num_base_anchors(self):
    method num_levels (line 118) | def num_levels(self):
    method gen_base_anchors (line 122) | def gen_base_anchors(self):
    method gen_single_level_base_anchors (line 142) | def gen_single_level_base_anchors(self,
    method _meshgrid (line 187) | def _meshgrid(self, x, y, row_major=True):
    method grid_anchors (line 207) | def grid_anchors(self, featmap_sizes, device='cuda'):
    method single_level_grid_anchors (line 233) | def single_level_grid_anchors(self,
    method valid_flags (line 272) | def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
    method single_level_valid_flags (line 299) | def single_level_valid_flags(self,
    method __repr__ (line 330) | def __repr__(self):
  class SSDAnchorGenerator (line 350) | class SSDAnchorGenerator(AnchorGenerator):
    method __init__ (line 366) | def __init__(self,
    method gen_base_anchors (line 435) | def gen_base_anchors(self):
    method __repr__ (line 456) | def __repr__(self):
  class LegacyAnchorGenerator (line 474) | class LegacyAnchorGenerator(AnchorGenerator):
    method gen_single_level_base_anchors (line 522) | def gen_single_level_base_anchors(self,
  class LegacySSDAnchorGenerator (line 573) | class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
    method __init__ (line 580) | def __init__(self,
  class YOLOAnchorGenerator (line 595) | class YOLOAnchorGenerator(AnchorGenerator):
    method __init__ (line 605) | def __init__(self, strides, base_sizes):
    method num_levels (line 618) | def num_levels(self):
    method gen_base_anchors (line 622) | def gen_base_anchors(self):
    method gen_single_level_base_anchors (line 639) | def gen_single_level_base_anchors(self, base_sizes_per_level, center=N...
    method responsible_flags (line 667) | def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
    method single_level_responsible_flags (line 692) | def single_level_responsible_flags(self,

FILE: mmdet/core/anchor/builder.py
  function build_anchor_generator (line 6) | def build_anchor_generator(cfg, default_args=None):

FILE: mmdet/core/anchor/point_generator.py
  class PointGenerator (line 7) | class PointGenerator(object):
    method _meshgrid (line 9) | def _meshgrid(self, x, y, row_major=True):
    method grid_points (line 17) | def grid_points(self, featmap_size, stride=16, device='cuda'):
    method valid_flags (line 27) | def valid_flags(self, featmap_size, valid_size, device='cuda'):

FILE: mmdet/core/anchor/utils.py
  function images_to_levels (line 4) | def images_to_levels(target, num_levels):
  function anchor_inside_flags (line 20) | def anchor_inside_flags(flat_anchors,
  function calc_region (line 49) | def calc_region(bbox, ratio, featmap_size=None):

FILE: mmdet/core/bbox/assigners/approx_max_iou_assigner.py
  class ApproxMaxIoUAssigner (line 9) | class ApproxMaxIoUAssigner(MaxIoUAssigner):
    method __init__ (line 39) | def __init__(self,
    method assign (line 59) | def assign(self,

FILE: mmdet/core/bbox/assigners/assign_result.py
  class AssignResult (line 6) | class AssignResult(util_mixins.NiceRepr):
    method __init__ (line 42) | def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
    method num_preds (line 51) | def num_preds(self):
    method set_extra_property (line 55) | def set_extra_property(self, key, value):
    method get_extra_property (line 60) | def get_extra_property(self, key):
    method info (line 65) | def info(self):
    method __nice__ (line 77) | def __nice__(self):
    method random (line 97) | def random(cls, **kwargs):
    method add_gt_ (line 190) | def add_gt_(self, gt_labels):

FILE: mmdet/core/bbox/assigners/atss_assigner.py
  class ATSSAssigner (line 10) | class ATSSAssigner(BaseAssigner):
    method __init__ (line 23) | def __init__(self,
    method assign (line 33) | def assign(self,

FILE: mmdet/core/bbox/assigners/base_assigner.py
  class BaseAssigner (line 4) | class BaseAssigner(metaclass=ABCMeta):
    method assign (line 8) | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...

FILE: mmdet/core/bbox/assigners/center_region_assigner.py
  function scale_boxes (line 9) | def scale_boxes(bboxes, scale):
  function is_located_in (line 36) | def is_located_in(points, bboxes):
  function bboxes_area (line 54) | def bboxes_area(bboxes):
  class CenterRegionAssigner (line 71) | class CenterRegionAssigner(BaseAssigner):
    method __init__ (line 93) | def __init__(self,
    method get_gt_priorities (line 107) | def get_gt_priorities(self, gt_bboxes):
    method assign (line 125) | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...
    method assign_one_hot_gt_indices (line 256) | def assign_one_hot_gt_indices(self,

FILE: mmdet/core/bbox/assigners/grid_assigner.py
  class GridAssigner (line 10) | class GridAssigner(BaseAssigner):
    method __init__ (line 30) | def __init__(self,
    method assign (line 42) | def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=N...

FILE: mmdet/core/bbox/assigners/hungarian_assigner.py
  class HungarianAssigner (line 16) | class HungarianAssigner(BaseAssigner):
    method __init__ (line 44) | def __init__(self,
    method assign (line 52) | def assign(self,

FILE: mmdet/core/bbox/assigners/max_iou_assigner.py
  class MaxIoUAssigner (line 10) | class MaxIoUAssigner(BaseAssigner):
    method __init__ (line 40) | def __init__(self,
    method assign (line 60) | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...
    method assign_wrt_overlaps (line 127) | def assign_wrt_overlaps(self, overlaps, gt_labels=None):

FILE: mmdet/core/bbox/assigners/point_assigner.py
  class PointAssigner (line 9) | class PointAssigner(BaseAssigner):
    method __init__ (line 19) | def __init__(self, scale=4, pos_num=3):
    method assign (line 23) | def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...

FILE: mmdet/core/bbox/assigners/region_assigner.py
  function calc_region (line 9) | def calc_region(bbox, ratio, stride, featmap_size=None):
  function anchor_ctr_inside_region_flags (line 26) | def anchor_ctr_inside_region_flags(anchors, stride, region):
  class RegionAssigner (line 37) | class RegionAssigner(BaseAssigner):
    method __init__ (line 53) | def __init__(self, center_ratio=0.2, ignore_ratio=0.5):
    method assign (line 57) | def assign(self,

FILE: mmdet/core/bbox/assigners/uniform_assigner.py
  class UniformAssigner (line 11) | class UniformAssigner(BaseAssigner):
    method __init__ (line 24) | def __init__(self,
    method assign (line 34) | def assign(self,

FILE: mmdet/core/bbox/builder.py
  function build_assigner (line 8) | def build_assigner(cfg, **default_args):
  function build_sampler (line 13) | def build_sampler(cfg, **default_args):
  function build_bbox_coder (line 18) | def build_bbox_coder(cfg, **default_args):

FILE: mmdet/core/bbox/coder/base_bbox_coder.py
  class BaseBBoxCoder (line 4) | class BaseBBoxCoder(metaclass=ABCMeta):
    method __init__ (line 7) | def __init__(self, **kwargs):
    method encode (line 11) | def encode(self, bboxes, gt_bboxes):
    method decode (line 15) | def decode(self, bboxes, bboxes_pred):

FILE: mmdet/core/bbox/coder/bucketing_bbox_coder.py
  class BucketingBBoxCoder (line 12) | class BucketingBBoxCoder(BaseBBoxCoder):
    method __init__ (line 34) | def __init__(self,
    method encode (line 49) | def encode(self, bboxes, gt_bboxes):
    method decode (line 71) | def decode(self, bboxes, pred_bboxes, max_shape=None):
  function generat_buckets (line 95) | def generat_buckets(proposals, num_buckets, scale_factor=1.0):
  function bbox2bucket (line 144) | def bbox2bucket(proposals,
  function bucket2bbox (line 268) | def bucket2bbox(proposals,

FILE: mmdet/core/bbox/coder/delta_xywh_bbox_coder.py
  class DeltaXYWHBBoxCoder (line 10) | class DeltaXYWHBBoxCoder(BaseBBoxCoder):
    method __init__ (line 31) | def __init__(self,
    method encode (line 44) | def encode(self, bboxes, gt_bboxes):
    method decode (line 62) | def decode(self,
  function bbox2delta (line 98) | def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., ...
  function delta2bbox (line 144) | def delta2bbox(rois,

FILE: mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
  class LegacyDeltaXYWHBBoxCoder (line 10) | class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder):
    method __init__ (line 33) | def __init__(self,
    method encode (line 40) | def encode(self, bboxes, gt_bboxes):
    method decode (line 58) | def decode(self,
  function legacy_bbox2delta (line 84) | def legacy_bbox2delta(proposals,
  function legacy_delta2bbox (line 133) | def legacy_delta2bbox(rois,

FILE: mmdet/core/bbox/coder/pseudo_bbox_coder.py
  class PseudoBBoxCoder (line 6) | class PseudoBBoxCoder(BaseBBoxCoder):
    method __init__ (line 9) | def __init__(self, **kwargs):
    method encode (line 12) | def encode(self, bboxes, gt_bboxes):
    method decode (line 16) | def decode(self, bboxes, pred_bboxes):

FILE: mmdet/core/bbox/coder/tblr_bbox_coder.py
  class TBLRBBoxCoder (line 9) | class TBLRBBoxCoder(BaseBBoxCoder):
    method __init__ (line 25) | def __init__(self, normalizer=4.0, clip_border=True):
    method encode (line 30) | def encode(self, bboxes, gt_bboxes):
    method decode (line 49) | def decode(self, bboxes, pred_bboxes, max_shape=None):
  function bboxes2tblr (line 76) | def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):
  function tblr2bboxes (line 123) | def tblr2bboxes(priors,

FILE: mmdet/core/bbox/coder/yolo_bbox_coder.py
  class YOLOBBoxCoder (line 9) | class YOLOBBoxCoder(BaseBBoxCoder):
    method __init__ (line 21) | def __init__(self, eps=1e-6):
    method encode (line 26) | def encode(self, bboxes, gt_bboxes, stride):
    method decode (line 61) | def decode(self, bboxes, pred_bboxes, stride):

FILE: mmdet/core/bbox/demodata.py
  function random_boxes (line 7) | def random_boxes(num=1, scale=1, rng=None):

FILE: mmdet/core/bbox/iou_calculators/builder.py
  function build_iou_calculator (line 6) | def build_iou_calculator(cfg, default_args=None):

FILE: mmdet/core/bbox/iou_calculators/iou2d_calculator.py
  function cast_tensor_type (line 6) | def cast_tensor_type(x, scale=1., dtype=None):
  function fp16_clamp (line 13) | def fp16_clamp(x, min=None, max=None):
  class BboxOverlaps2D (line 22) | class BboxOverlaps2D(object):
    method __init__ (line 25) | def __init__(self, scale=1., dtype=None):
    method __call__ (line 29) | def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
    method __repr__ (line 67) | def __repr__(self):
  function bbox_overlaps (line 74) | def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e...

FILE: mmdet/core/bbox/match_costs/builder.py
  function build_match_cost (line 6) | def build_match_cost(cfg, default_args=None):

FILE: mmdet/core/bbox/match_costs/match_cost.py
  class BBoxL1Cost (line 9) | class BBoxL1Cost(object):
    method __init__ (line 27) | def __init__(self, weight=1., box_format='xyxy'):
    method __call__ (line 32) | def __call__(self, bbox_pred, gt_bboxes):
  class FocalLossCost (line 53) | class FocalLossCost(object):
    method __init__ (line 76) | def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12):
    method __call__ (line 82) | def __call__(self, cls_pred, gt_labels):
  class ClassificationCost (line 102) | class ClassificationCost(object):
    method __init__ (line 123) | def __init__(self, weight=1.):
    method __call__ (line 126) | def __call__(self, cls_pred, gt_labels):
  class IoUCost (line 146) | class IoUCost(object):
    method __init__ (line 164) | def __init__(self, iou_mode='giou', weight=1.):
    method __call__ (line 168) | def __call__(self, bboxes, gt_bboxes):

FILE: mmdet/core/bbox/samplers/base_sampler.py
  class BaseSampler (line 8) | class BaseSampler(metaclass=ABCMeta):
    method __init__ (line 11) | def __init__(self,
    method _sample_pos (line 25) | def _sample_pos(self, assign_result, num_expected, **kwargs):
    method _sample_neg (line 30) | def _sample_neg(self, assign_result, num_expected, **kwargs):
    method sample (line 34) | def sample(self,

FILE: mmdet/core/bbox/samplers/combined_sampler.py
  class CombinedSampler (line 6) | class CombinedSampler(BaseSampler):
    method __init__ (line 9) | def __init__(self, pos_sampler, neg_sampler, **kwargs):
    method _sample_pos (line 14) | def _sample_pos(self, **kwargs):
    method _sample_neg (line 18) | def _sample_neg(self, **kwargs):

FILE: mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py
  class InstanceBalancedPosSampler (line 9) | class InstanceBalancedPosSampler(RandomSampler):
    method _sample_pos (line 13) | def _sample_pos(self, assign_result, num_expected, **kwargs):

FILE: mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
  class IoUBalancedNegSampler (line 9) | class IoUBalancedNegSampler(RandomSampler):
    method __init__ (line 29) | def __init__(self,
    method sample_via_interval (line 46) | def sample_via_interval(self, max_overlaps, full_set, num_expected):
    method _sample_neg (line 88) | def _sample_neg(self, assign_result, num_expected, **kwargs):

FILE: mmdet/core/bbox/samplers/ohem_sampler.py
  class OHEMSampler (line 9) | class OHEMSampler(BaseSampler):
    method __init__ (line 15) | def __init__(self,
    method hard_mining (line 30) | def hard_mining(self, inds, num_expected, bboxes, labels, feats):
    method _sample_pos (line 51) | def _sample_pos(self,
    method _sample_neg (line 79) | def _sample_neg(self,

FILE: mmdet/core/bbox/samplers/pseudo_sampler.py
  class PseudoSampler (line 9) | class PseudoSampler(BaseSampler):
    method __init__ (line 12) | def __init__(self, **kwargs):
    method _sample_pos (line 15) | def _sample_pos(self, **kwargs):
    method _sample_neg (line 19) | def _sample_neg(self, **kwargs):
    method sample (line 23) | def sample(self, assign_result, bboxes, gt_bboxes, **kwargs):

FILE: mmdet/core/bbox/samplers/random_sampler.py
  class RandomSampler (line 8) | class RandomSampler(BaseSampler):
    method __init__ (line 20) | def __init__(self,
    method random_choice (line 31) | def random_choice(self, gallery, num):
    method _sample_pos (line 63) | def _sample_pos(self, assign_result, num_expected, **kwargs):
    method _sample_neg (line 73) | def _sample_neg(self, assign_result, num_expected, **kwargs):

FILE: mmdet/core/bbox/samplers/sampling_result.py
  class SamplingResult (line 6) | class SamplingResult(util_mixins.NiceRepr):
    method __init__ (line 25) | def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
    method bboxes (line 52) | def bboxes(self):
    method to (line 56) | def to(self, device):
    method __nice__ (line 71) | def __nice__(self):
    method info (line 80) | def info(self):
    method random (line 93) | def random(cls, rng=None, **kwargs):

FILE: mmdet/core/bbox/samplers/score_hlr_sampler.py
  class ScoreHLRSampler (line 11) | class ScoreHLRSampler(BaseSampler):
    method __init__ (line 32) | def __init__(self,
    method random_choice (line 62) | def random_choice(gallery, num):
    method _sample_pos (line 91) | def _sample_pos(self, assign_result, num_expected, **kwargs):
    method _sample_neg (line 99) | def _sample_neg(self,
    method sample (line 215) | def sample(self,

FILE: mmdet/core/bbox/transforms.py
  function bbox_flip (line 5) | def bbox_flip(bboxes, img_shape, direction='horizontal'):
  function bbox_mapping (line 34) | def bbox_mapping(bboxes,
  function bbox_mapping_back (line 46) | def bbox_mapping_back(bboxes,
  function bbox2roi (line 58) | def bbox2roi(bbox_list):
  function roi2bbox (line 80) | def roi2bbox(rois):
  function bbox2result (line 99) | def bbox2result(bboxes, labels, num_classes):
  function distance2bbox (line 119) | def distance2bbox(points, distance, max_shape=None):
  function bbox2distance (line 165) | def bbox2distance(points, bbox, max_dis=None, eps=0.1):
  function bbox_rescale (line 189) | def bbox_rescale(bboxes, scale_factor=1.0):
  function bbox_cxcywh_to_xyxy (line 221) | def bbox_cxcywh_to_xyxy(bbox):
  function bbox_xyxy_to_cxcywh (line 235) | def bbox_xyxy_to_cxcywh(bbox):

FILE: mmdet/core/evaluation/bbox_overlaps.py
  function bbox_overlaps (line 4) | def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6):

FILE: mmdet/core/evaluation/class_names.py
  function wider_face_classes (line 4) | def wider_face_classes():
  function voc_classes (line 8) | def voc_classes():
  function imagenet_det_classes (line 16) | def imagenet_det_classes():
  function imagenet_vid_classes (line 57) | def imagenet_vid_classes():
  function coco_classes (line 67) | def coco_classes():
  function cityscapes_classes (line 85) | def cityscapes_classes():
  function get_classes (line 102) | def get_classes(dataset):

FILE: mmdet/core/evaluation/eval_hooks.py
  class EvalHook (line 14) | class EvalHook(Hook):
    method __init__ (line 49) | def __init__(self,
    method _init_rule (line 81) | def _init_rule(self, rule, key_indicator):
    method before_run (line 108) | def before_run(self, runner):
    method before_train_epoch (line 115) | def before_train_epoch(self, runner):
    method evaluation_flag (line 123) | def evaluation_flag(self, runner):
    method after_train_epoch (line 142) | def after_train_epoch(self, runner):
    method after_train_iter (line 151) | def after_train_iter(self, runner):
    method save_best_checkpoint (line 160) | def save_best_checkpoint(self, runner, key_score):
    method evaluate (line 175) | def evaluate(self, runner, results):
  class DistEvalHook (line 190) | class DistEvalHook(EvalHook):
    method __init__ (line 224) | def __init__(self,
    method _broadcast_bn_buffer (line 247) | def _broadcast_bn_buffer(self, runner):
    method after_train_epoch (line 261) | def after_train_epoch(self, runner):
    method after_train_iter (line 283) | def after_train_iter(self, runner):

FILE: mmdet/core/evaluation/mean_ap.py
  function average_precision (line 12) | def average_precision(recalls, precisions, mode='area'):
  function tpfp_imagenet (line 59) | def tpfp_imagenet(det_bboxes,
  function tpfp_default (line 153) | def tpfp_default(det_bboxes,
  function get_cls_results (line 240) | def get_cls_results(det_results, annotations, class_id):
  function eval_map (line 267) | def eval_map(det_results,
  function print_map_summary (line 404) | def print_map_summary(mean_ap,

FILE: mmdet/core/evaluation/recall.py
  function _recalls (line 10) | def _recalls(all_ious, proposal_nums, thrs):
  function set_recall_param (line 43) | def set_recall_param(proposal_nums, iou_thrs):
  function eval_recalls (line 64) | def eval_recalls(gts,
  function print_recall_summary (line 109) | def print_recall_summary(recalls,
  function plot_num_recall (line 142) | def plot_num_recall(recalls, proposal_nums):
  function plot_iou_recall (line 167) | def plot_iou_recall(recalls, iou_thrs):

FILE: mmdet/core/export/model_wrappers.py
  class ONNXRuntimeDetector (line 12) | class ONNXRuntimeDetector(BaseDetector):
    method __init__ (line 15) | def __init__(self, onnx_file, class_names, device_id):
    method simple_test (line 46) | def simple_test(self, img, img_metas, **kwargs):
    method aug_test (line 49) | def aug_test(self, imgs, img_metas, **kwargs):
    method extract_feat (line 52) | def extract_feat(self, imgs):
    method forward_test (line 55) | def forward_test(self, imgs, img_metas, **kwargs):

FILE: mmdet/core/export/onnx_helper.py
  function dynamic_clip_for_onnx (line 6) | def dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape):
  function get_k_for_topk (line 45) | def get_k_for_topk(k, size):
  function add_dummy_nms_for_onnx (line 81) | def add_dummy_nms_for_onnx(boxes,
  class DymmyONNXNMSop (line 200) | class DymmyONNXNMSop(torch.autograd.Function):
    method forward (line 207) | def forward(ctx, boxes, scores, max_output_boxes_per_class, iou_thresh...
    method symbolic (line 213) | def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold,

FILE: mmdet/core/export/pytorch2onnx.py
  function generate_inputs_and_wrap_model (line 9) | def generate_inputs_and_wrap_model(config_path,
  function build_model_from_cfg (line 64) | def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
  function preprocess_example_input (line 98) | def preprocess_example_input(input_config):

FILE: mmdet/core/mask/mask_target.py
  function mask_target (line 6) | def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_...
  function mask_target_single (line 66) | def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):

FILE: mmdet/core/mask/structures.py
  class BaseInstanceMasks (line 11) | class BaseInstanceMasks(metaclass=ABCMeta):
    method rescale (line 15) | def rescale(self, scale, interpolation='nearest'):
    method resize (line 28) | def resize(self, out_shape, interpolation='nearest'):
    method flip (line 40) | def flip(self, flip_direction='horizontal'):
    method pad (line 51) | def pad(self, out_shape, pad_val):
    method crop (line 63) | def crop(self, bbox):
    method crop_and_resize (line 74) | def crop_and_resize(self,
    method expand (line 99) | def expand(self, expanded_h, expanded_w, top, left):
    method areas (line 104) | def areas(self):
    method to_ndarray (line 108) | def to_ndarray(self):
    method to_tensor (line 116) | def to_tensor(self, dtype, device):
    method translate (line 128) | def translate(self,
    method shear (line 148) | def shear(self,
    method rotate (line 170) | def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
  class BitmapMasks (line 188) | class BitmapMasks(BaseInstanceMasks):
    method __init__ (line 217) | def __init__(self, masks, height, width):
    method __getitem__ (line 234) | def __getitem__(self, index):
    method __iter__ (line 246) | def __iter__(self):
    method __repr__ (line 249) | def __repr__(self):
    method __len__ (line 256) | def __len__(self):
    method rescale (line 260) | def rescale(self, scale, interpolation='nearest'):
    method resize (line 273) | def resize(self, out_shape, interpolation='nearest'):
    method flip (line 285) | def flip(self, flip_direction='horizontal'):
    method pad (line 298) | def pad(self, out_shape, pad_val=0):
    method crop (line 309) | def crop(self, bbox):
    method crop_and_resize (line 328) | def crop_and_resize(self,
    method expand (line 360) | def expand(self, expanded_h, expanded_w, top, left):
    method translate (line 372) | def translate(self,
    method shear (line 422) | def shear(self,
    method rotate (line 457) | def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
    method areas (line 490) | def areas(self):
    method to_ndarray (line 494) | def to_ndarray(self):
    method to_tensor (line 498) | def to_tensor(self, dtype, device):
    method random (line 503) | def random(cls,
  class PolygonMasks (line 524) | class PolygonMasks(BaseInstanceMasks):
    method __init__ (line 564) | def __init__(self, masks, height, width):
    method __getitem__ (line 574) | def __getitem__(self, index):
    method __iter__ (line 597) | def __iter__(self):
    method __repr__ (line 600) | def __repr__(self):
    method __len__ (line 607) | def __len__(self):
    method rescale (line 611) | def rescale(self, scale, interpolation=None):
    method resize (line 620) | def resize(self, out_shape, interpolation=None):
    method flip (line 639) | def flip(self, flip_direction='horizontal'):
    method crop (line 663) | def crop(self, bbox):
    method pad (line 692) | def pad(self, out_shape, pad_val=0):
    method expand (line 696) | def expand(self, *args, **kwargs):
    method crop_and_resize (line 700) | def crop_and_resize(self,
    method translate (line 736) | def translate(self,
    method shear (line 770) | def shear(self,
    method rotate (line 802) | def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
    method to_bitmap (line 831) | def to_bitmap(self):
    method areas (line 837) | def areas(self):
    method _polygon_area (line 855) | def _polygon_area(self, x, y):
    method to_ndarray (line 871) | def to_ndarray(self):
    method to_tensor (line 881) | def to_tensor(self, dtype, device):
    method random (line 891) | def random(cls,
  function polygon_to_bitmap (line 1010) | def polygon_to_bitmap(polygons, height, width):

FILE: mmdet/core/mask/utils.py
  function split_combined_polys (line 6) | def split_combined_polys(polys, poly_lens, polys_per_mask):
  function encode_mask_results (line 36) | def encode_mask_results(mask_results):

FILE: mmdet/core/post_processing/bbox_nms.py
  function multiclass_nms (line 7) | def multiclass_nms(multi_bboxes,
  function fast_nms (line 96) | def fast_nms(multi_bboxes,

FILE: mmdet/core/post_processing/merge_augs.py
  function merge_aug_proposals (line 12) | def merge_aug_proposals(aug_proposals, img_metas, cfg):
  function merge_aug_bboxes (line 83) | def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
  function merge_aug_scores (line 112) | def merge_aug_scores(aug_scores):
  function merge_aug_masks (line 120) | def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):

FILE: mmdet/core/utils/dist_utils.py
  function _allreduce_coalesced (line 10) | def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
  function allreduce_grads (line 32) | def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
  class DistOptimizerHook (line 54) | class DistOptimizerHook(OptimizerHook):
    method __init__ (line 57) | def __init__(self, *args, **kwargs):
  function reduce_mean (line 63) | def reduce_mean(tensor):

FILE: mmdet/core/utils/misc.py
  function multi_apply (line 10) | def multi_apply(func, *args, **kwargs):
  function unmap (line 32) | def unmap(data, count, inds, fill=0):
  function mask2ndarray (line 45) | def mask2ndarray(mask):

FILE: mmdet/core/visualization/image.py
  function color_val_matplotlib (line 13) | def color_val_matplotlib(color):
  function imshow_det_bboxes (line 28) | def imshow_det_bboxes(img,
  function imshow_gt_det_bboxes (line 198) | def imshow_gt_det_bboxes(img,

FILE: mmdet/datasets/api_wrappers/coco_api.py
  class COCO (line 10) | class COCO(_COCO):
    method __init__ (line 17) | def __init__(self, annotation_file=None):
    method get_ann_ids (line 26) | def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
    method get_cat_ids (line 29) | def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
    method get_img_ids (line 32) | def get_img_ids(self, img_ids=[], cat_ids=[]):
    method load_anns (line 35) | def load_anns(self, ids):
    method load_cats (line 38) | def load_cats(self, ids):
    method load_imgs (line 41) | def load_imgs(self, ids):

FILE: mmdet/datasets/builder.py
  function _concat_dataset (line 26) | def _concat_dataset(cfg, default_args=None):
  function build_dataset (line 53) | def build_dataset(cfg, default_args=None):
  function build_dataloader (line 76) | def build_dataloader(dataset,
  function worker_init_fn (line 138) | def worker_init_fn(worker_id, num_workers, rank, seed):

FILE: mmdet/datasets/cityscapes.py
  class CityscapesDataset (line 20) | class CityscapesDataset(CocoDataset):
    method _filter_imgs (line 25) | def _filter_imgs(self, min_size=32):
    method _parse_ann_info (line 53) | def _parse_ann_info(self, img_info, ann_info):
    method results2txt (line 107) | def results2txt(self, results, outfile_prefix):
    method format_results (line 173) | def format_results(self, results, txtfile_prefix=None):
    method evaluate (line 207) | def evaluate(self,
    method _evaluate_cityscapes (line 274) | def _evaluate_cityscapes(self, results, txtfile_prefix, logger):

FILE: mmdet/datasets/coco.py
  class CocoDataset (line 20) | class CocoDataset(CustomDataset):
    method load_annotations (line 37) | def load_annotations(self, ann_file):
    method get_ann_info (line 63) | def get_ann_info(self, idx):
    method get_cat_ids (line 78) | def get_cat_ids(self, idx):
    method _filter_imgs (line 93) | def _filter_imgs(self, min_size=32):
    method _parse_ann_info (line 117) | def _parse_ann_info(self, img_info, ann_info):
    method xyxy2xywh (line 176) | def xyxy2xywh(self, bbox):
    method _proposal2json (line 196) | def _proposal2json(self, results):
    method _det2json (line 211) | def _det2json(self, results):
    method _segm2json (line 228) | def _segm2json(self, results):
    method results2json (line 266) | def results2json(self, results, outfile_prefix):
    method fast_eval_recall (line 306) | def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=No...
    method format_results (line 330) | def format_results(self, results, jsonfile_prefix=None, **kwargs):
    method evaluate (line 358) | def evaluate(self,

FILE: mmdet/datasets/custom.py
  class CustomDataset (line 17) | class CustomDataset(Dataset):
    method __init__ (line 56) | def __init__(self,
    method __len__ (line 107) | def __len__(self):
    method load_annotations (line 111) | def load_annotations(self, ann_file):
    method load_proposals (line 115) | def load_proposals(self, proposal_file):
    method get_ann_info (line 119) | def get_ann_info(self, idx):
    method get_cat_ids (line 131) | def get_cat_ids(self, idx):
    method pre_pipeline (line 143) | def pre_pipeline(self, results):
    method _filter_imgs (line 152) | def _filter_imgs(self, min_size=32):
    method _set_group_flag (line 163) | def _set_group_flag(self):
    method _rand_another (line 175) | def _rand_another(self, idx):
    method __getitem__ (line 180) | def __getitem__(self, idx):
    method prepare_train_img (line 200) | def prepare_train_img(self, idx):
    method prepare_test_img (line 219) | def prepare_test_img(self, idx):
    method get_classes (line 238) | def get_classes(cls, classes=None):
    method format_results (line 264) | def format_results(self, results, **kwargs):
    method evaluate (line 267) | def evaluate(self,
    method __repr__ (line 326) | def __repr__(self):

FILE: mmdet/datasets/dataset_wrappers.py
  class ConcatDataset (line 14) | class ConcatDataset(_ConcatDataset):
    method __init__ (line 27) | def __init__(self, datasets, separate_eval=True):
    method get_cat_ids (line 46) | def get_cat_ids(self, idx):
    method evaluate (line 68) | def evaluate(self, results, logger=None, **kwargs):
  class RepeatDataset (line 128) | class RepeatDataset(object):
    method __init__ (line 141) | def __init__(self, dataset, times):
    method __getitem__ (line 150) | def __getitem__(self, idx):
    method get_cat_ids (line 153) | def get_cat_ids(self, idx):
    method __len__ (line 165) | def __len__(self):
  class ClassBalancedDataset (line 172) | class ClassBalancedDataset(object):
    method __init__ (line 208) | def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
    method _get_repeat_factors (line 227) | def _get_repeat_factors(self, dataset, repeat_thr):
    method __getitem__ (line 276) | def __getitem__(self, idx):
    method __len__ (line 280) | def __len__(self):

FILE: mmdet/datasets/deepfashion.py
  class DeepFashionDataset (line 6) | class DeepFashionDataset(CocoDataset):

FILE: mmdet/datasets/lvis.py
  class LVISV05Dataset (line 17) | class LVISV05Dataset(CocoDataset):
    method load_annotations (line 269) | def load_annotations(self, ann_file):
    method evaluate (line 308) | def evaluate(self,
  class LVISV1Dataset (line 470) | class LVISV1Dataset(LVISDataset):
    method load_annotations (line 712) | def load_annotations(self, ann_file):

FILE: mmdet/datasets/pipelines/auto_augment.py
  function level_to_value (line 13) | def level_to_value(level, max_value):
  function enhance_level_to_value (line 18) | def enhance_level_to_value(level, a=1.8, b=0.1):
  function random_negative (line 23) | def random_negative(value, random_negative_prob):
  function bbox2fields (line 28) | def bbox2fields():
  class AutoAugment (line 46) | class AutoAugment(object):
    method __init__ (line 89) | def __init__(self, policies):
    method __call__ (line 103) | def __call__(self, results):
    method __repr__ (line 107) | def __repr__(self):
  class Shear (line 112) | class Shear(object):
    method __init__ (line 135) | def __init__(self,
    method _shear_img (line 179) | def _shear_img(self,
    method _shear_bboxes (line 203) | def _shear_bboxes(self, results, magnitude):
    method _shear_masks (line 233) | def _shear_masks(self,
    method _shear_seg (line 249) | def _shear_seg(self,
    method _filter_invalid (line 265) | def _filter_invalid(self, results, min_bbox_size=0):
    method __call__ (line 284) | def __call__(self, results):
    method __repr__ (line 315) | def __repr__(self):
  class Rotate (line 329) | class Rotate(object):
    method __init__ (line 355) | def __init__(self,
    method _rotate_img (line 406) | def _rotate_img(self, results, angle, center=None, scale=1.0):
    method _rotate_bboxes (line 424) | def _rotate_bboxes(self, results, rotate_matrix):
    method _rotate_masks (line 459) | def _rotate_masks(self,
    method _rotate_seg (line 471) | def _rotate_seg(self,
    method _filter_invalid (line 484) | def _filter_invalid(self, results, min_bbox_size=0):
    method __call__ (line 503) | def __call__(self, results):
    method __repr__ (line 529) | def __repr__(self):
  class Translate (line 543) | class Translate(object):
    method __init__ (line 569) | def __init__(self,
    method _translate_img (line 610) | def _translate_img(self, results, offset, direction='horizontal'):
    method _translate_bboxes (line 624) | def _translate_bboxes(self, results, offset):
    method _translate_masks (line 642) | def _translate_masks(self,
    method _translate_seg (line 653) | def _translate_seg(self,
    method _filter_invalid (line 664) | def _filter_invalid(self, results, min_size=0):
    method __call__ (line 683) | def __call__(self, results):
  class ColorTransform (line 709) | class ColorTransform(object):
    method __init__ (line 718) | def __init__(self, level, prob=0.5):
    method _adjust_color_img (line 729) | def _adjust_color_img(self, results, factor=1.0):
    method __call__ (line 736) | def __call__(self, results):
    method __repr__ (line 750) | def __repr__(self):
  class EqualizeTransform (line 758) | class EqualizeTransform(object):
    method __init__ (line 766) | def __init__(self, prob=0.5):
    method _imequalize (line 771) | def _imequalize(self, results):
    method __call__ (line 777) | def __call__(self, results):
    method __repr__ (line 791) | def __repr__(self):
  class BrightnessTransform (line 797) | class BrightnessTransform(object):
    method __init__ (line 806) | def __init__(self, level, prob=0.5):
    method _adjust_brightness_img (line 817) | def _adjust_brightness_img(self, results, factor=1.0):
    method __call__ (line 824) | def __call__(self, results):
    method __repr__ (line 838) | def __repr__(self):
  class ContrastTransform (line 846) | class ContrastTransform(object):
    method __init__ (line 855) | def __init__(self, level, prob=0.5):
    method _adjust_contrast_img (line 866) | def _adjust_contrast_img(self, results, factor=1.0):
    method __call__ (line 872) | def __call__(self, results):
    method __repr__ (line 886) | def __repr__(self):

FILE: mmdet/datasets/pipelines/compose.py
  class Compose (line 9) | class Compose(object):
    method __init__ (line 17) | def __init__(self, transforms):
    method __call__ (line 29) | def __call__(self, data):
    method __repr__ (line 45) | def __repr__(self):

FILE: mmdet/datasets/pipelines/formating.py
  function to_tensor (line 11) | def to_tensor(data):
  class ToTensor (line 37) | class ToTensor(object):
    method __init__ (line 44) | def __init__(self, keys):
    method __call__ (line 47) | def __call__(self, results):
    method __repr__ (line 61) | def __repr__(self):
  class ImageToTensor (line 66) | class ImageToTensor(object):
    method __init__ (line 77) | def __init__(self, keys):
    method __call__ (line 80) | def __call__(self, results):
    method __repr__ (line 98) | def __repr__(self):
  class Transpose (line 103) | class Transpose(object):
    method __init__ (line 111) | def __init__(self, keys, order):
    method __call__ (line 115) | def __call__(self, results):
    method __repr__ (line 129) | def __repr__(self):
  class ToDataContainer (line 135) | class ToDataContainer(object):
    method __init__ (line 146) | def __init__(self,
    method __call__ (line 151) | def __call__(self, results):
    method __repr__ (line 169) | def __repr__(self):
  class DefaultFormatBundle (line 174) | class DefaultFormatBundle(object):
    method __call__ (line 191) | def __call__(self, results):
    method _add_default_meta_keys (line 221) | def _add_default_meta_keys(self, results):
    method __repr__ (line 246) | def __repr__(self):
  class Collect (line 251) | class Collect(object):
    method __init__ (line 290) | def __init__(self,
    method __call__ (line 298) | def __call__(self, results):
    method __repr__ (line 321) | def __repr__(self):
  class WrapFieldsToLists (line 327) | class WrapFieldsToLists(object):
    method __call__ (line 347) | def __call__(self, results):
    method __repr__ (line 363) | def __repr__(self):

FILE: mmdet/datasets/pipelines/instaboost.py
  class InstaBoost (line 7) | class InstaBoost(object):
    method __init__ (line 15) | def __init__(self,
    method _load_anns (line 36) | def _load_anns(self, results):
    method _parse_anns (line 58) | def _parse_anns(self, results, anns, img):
    method __call__ (line 79) | def __call__(self, results):
    method __repr__ (line 95) | def __repr__(self):

FILE: mmdet/datasets/pipelines/loading.py
  class LoadImageFromFile (line 12) | class LoadImageFromFile(object):
    method __init__ (line 31) | def __init__(self,
    method __call__ (line 40) | def __call__(self, results):
    method __repr__ (line 72) | def __repr__(self):
  class LoadImageFromWebcam (line 81) | class LoadImageFromWebcam(LoadImageFromFile):
    method __call__ (line 88) | def __call__(self, results):
  class LoadMultiChannelImageFromFiles (line 113) | class LoadMultiChannelImageFromFiles(object):
    method __init__ (line 133) | def __init__(self,
    method __call__ (line 142) | def __call__(self, results):
    method __repr__ (line 187) | def __repr__(self):
  class LoadAnnotations (line 196) | class LoadAnnotations(object):
    method __init__ (line 215) | def __init__(self,
    method _load_bboxes (line 230) | def _load_bboxes(self, results):
    method _load_labels (line 250) | def _load_labels(self, results):
    method _poly2mask (line 263) | def _poly2mask(self, mask_ann, img_h, img_w):
    method process_polygons (line 290) | def process_polygons(self, polygons):
    method _load_masks (line 307) | def _load_masks(self, results):
    method _load_semantic_seg (line 332) | def _load_semantic_seg(self, results):
    method __call__ (line 353) | def __call__(self, results):
    method __repr__ (line 376) | def __repr__(self):
  class LoadProposals (line 388) | class LoadProposals(object):
    method __init__ (line 398) | def __init__(self, num_max_proposals=None):
    method __call__ (line 401) | def __call__(self, results):
    method __repr__ (line 427) | def __repr__(self):
  class FilterAnnotations (line 433) | class FilterAnnotations(object):
    method __init__ (line 441) | def __init__(self, min_gt_bbox_wh):
    method __call__ (line 445) | def __call__(self, results):

FILE: mmdet/datasets/pipelines/test_time_aug.py
  class MultiScaleFlipAug (line 10) | class MultiScaleFlipAug(object):
    method __init__ (line 52) | def __init__(self,
    method __call__ (line 83) | def __call__(self, results):
    method __repr__ (line 114) | def __repr__(self):

FILE: mmdet/datasets/pipelines/transforms.py
  class Resize (line 26) | class Resize(object):
    method __init__ (line 66) | def __init__(self,
    method random_select (line 99) | def random_select(img_scales):
    method random_sample (line 117) | def random_sample(img_scales):
    method random_sample_ratio (line 144) | def random_sample_ratio(img_scale, ratio_range):
    method _random_scale (line 170) | def _random_scale(self, results):
    method _resize_img (line 203) | def _resize_img(self, results):
    method _resize_bboxes (line 234) | def _resize_bboxes(self, results):
    method _resize_masks (line 244) | def _resize_masks(self, results):
    method _resize_seg (line 254) | def _resize_seg(self, results):
    method __call__ (line 271) | def __call__(self, results):
    method __repr__ (line 308) | def __repr__(self):
  class RandomFlip (line 319) | class RandomFlip(object):
    method __init__ (line 356) | def __init__(self, flip_ratio=None, direction='horizontal'):
    method bbox_flip (line 382) | def bbox_flip(self, bboxes, img_shape, direction):
    method __call__ (line 416) | def __call__(self, results):
    method __repr__ (line 471) | def __repr__(self):
  class RandomShift (line 476) | class RandomShift(object):
    method __init__ (line 487) | def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1):
    method __call__ (line 499) | def __call__(self, results):
    method __repr__ (line 559) | def __repr__(self):
  class Pad (line 566) | class Pad(object):
    method __init__ (line 579) | def __init__(self, size=None, size_divisor=None, pad_val=0):
    method _pad_img (line 587) | def _pad_img(self, results):
    method _pad_masks (line 601) | def _pad_masks(self, results):
    method _pad_seg (line 607) | def _pad_seg(self, results):
    method __call__ (line 614) | def __call__(self, results):
    method __repr__ (line 628) | def __repr__(self):
  class Normalize (line 637) | class Normalize(object):
    method __init__ (line 649) | def __init__(self, mean, std, to_rgb=True):
    method __call__ (line 654) | def __call__(self, results):
    method __repr__ (line 671) | def __repr__(self):
  class RandomCrop (line 678) | class RandomCrop(object):
    method __init__ (line 712) | def __init__(self,
    method _crop_data (line 741) | def _crop_data(self, results, crop_size, allow_negative_crop):
    method _get_crop_size (line 806) | def _get_crop_size(self, image_size):
    method __call__ (line 836) | def __call__(self, results):
    method __repr__ (line 852) | def __repr__(self):
  class SegRescale (line 862) | class SegRescale(object):
    method __init__ (line 872) | def __init__(self, scale_factor=1, backend='cv2'):
    method __call__ (line 876) | def __call__(self, results):
    method __repr__ (line 895) | def __repr__(self):
  class PhotoMetricDistortion (line 900) | class PhotoMetricDistortion(object):
    method __init__ (line 921) | def __init__(self,
    method __call__ (line 931) | def __call__(self, results):
    method __repr__ (line 994) | def __repr__(self):
  class Expand (line 1006) | class Expand(object):
    method __init__ (line 1019) | def __init__(self,
    method __call__ (line 1035) | def __call__(self, results):
    method __repr__ (line 1089) | def __repr__(self):
  class MinIoURandomCrop (line 1098) | class MinIoURandomCrop(object):
    method __init__ (line 1117) | def __init__(self,
    method __call__ (line 1135) | def __call__(self, results):
    method __repr__ (line 1229) | def __repr__(self):
  class Corrupt (line 1238) | class Corrupt(object):
    method __init__ (line 1249) | def __init__(self, corruption, severity=1):
    method __call__ (line 1253) | def __call__(self, results):
    method __repr__ (line 1274) | def __repr__(self):
  class Albu (line 1282) | class Albu(object):
    method __init__ (line 1324) | def __init__(self,
    method albu_builder (line 1367) | def albu_builder(self, cfg):
    method mapper (line 1402) | def mapper(d, keymap):
    method __call__ (line 1418) | def __call__(self, results):
    method __repr__ (line 1480) | def __repr__(self):
  class RandomCenterCropPad (line 1486) | class RandomCenterCropPad(object):
    method __init__ (line 1573) | def __init__(self,
    method _get_border (line 1617) | def _get_border(self, border, size):
    method _filter_boxes (line 1636) | def _filter_boxes(self, patch, boxes):
    method _crop_image_and_paste (line 1652) | def _crop_image_and_paste(self, image, center, size):
    method _train_aug (line 1703) | def _train_aug(self, results):
    method _test_aug (line 1771) | def _test_aug(self, results):
    method __call__ (line 1804) | def __call__(self, results):
    method __repr__ (line 1816) | def __repr__(self):
  class CutOut (line 1831) | class CutOut(object):
    method __init__ (line 1854) | def __init__(self,
    method __call__ (line 1875) | def __call__(self, results):
    method __repr__ (line 1895) | def __repr__(self):

FILE: mmdet/datasets/samplers/distributed_sampler.py
  class DistributedSampler (line 7) | class DistributedSampler(_DistributedSampler):
    method __init__ (line 9) | def __init__(self,
    method __iter__ (line 20) | def __iter__(self):

FILE: mmdet/datasets/samplers/group_sampler.py
  class GroupSampler (line 10) | class GroupSampler(Sampler):
    method __init__ (line 12) | def __init__(self, dataset, samples_per_gpu=1):
    method __iter__ (line 23) | def __iter__(self):
    method __len__ (line 47) | def __len__(self):
  class DistributedGroupSampler (line 51) | class DistributedGroupSampler(Sampler):
    method __init__ (line 72) | def __init__(self,
    method __iter__ (line 101) | def __iter__(self):
    method __len__ (line 144) | def __len__(self):
    method set_epoch (line 147) | def set_epoch(self, epoch):

FILE: mmdet/datasets/utils.py
  function replace_ImageToTensor (line 13) | def replace_ImageToTensor(pipelines):
  function get_loading_pipeline (line 73) | def get_loading_pipeline(pipeline):
  class NumClassCheckHook (line 115) | class NumClassCheckHook(Hook):
    method _check_head (line 117) | def _check_head(self, runner):
    method before_train_epoch (line 149) | def before_train_epoch(self, runner):
    method before_val_epoch (line 157) | def before_val_epoch(self, runner):

FILE: mmdet/datasets/voc.py
  class VOCDataset (line 11) | class VOCDataset(XMLDataset):
    method __init__ (line 18) | def __init__(self, **kwargs):
    method evaluate (line 27) | def evaluate(self,

FILE: mmdet/datasets/wider_face.py
  class WIDERFaceDataset (line 11) | class WIDERFaceDataset(XMLDataset):
    method __init__ (line 19) | def __init__(self, **kwargs):
    method load_annotations (line 22) | def load_annotations(self, ann_file):

FILE: mmdet/datasets/xml_style.py
  class XMLDataset (line 13) | class XMLDataset(CustomDataset):
    method __init__ (line 22) | def __init__(self, min_size=None, **kwargs):
    method load_annotations (line 29) | def load_annotations(self, ann_file):
    method _filter_imgs (line 61) | def _filter_imgs(self, min_size=32):
    method get_ann_info (line 82) | def get_ann_info(self, idx):
    method get_cat_ids (line 148) | def get_cat_ids(self, idx):

FILE: mmdet/models/backbones/darknet.py
  class ResBlock (line 13) | class ResBlock(BaseModule):
    method __init__ (line 32) | def __init__(self,
    method forward (line 49) | def forward(self, x):
  class Darknet (line 59) | class Darknet(BaseModule):
    method __init__ (line 100) | def __init__(self,
    method forward (line 152) | def forward(self, x):
    method _freeze_stages (line 162) | def _freeze_stages(self):
    method train (line 170) | def train(self, mode=True):
    method make_conv_res_block (line 179) | def make_conv_res_block(in_channels,

FILE: mmdet/models/backbones/detectors_resnet.py
  class Bottleneck (line 15) | class Bottleneck(_Bottleneck):
    method __init__ (line 35) | def __init__(self,
    method rfp_forward (line 72) | def rfp_forward(self, x, rfp_feat):
  class ResLayer (line 119) | class ResLayer(Sequential):
    method __init__ (line 145) | def __init__(self,
  class DetectoRS_ResNet (line 211) | class DetectoRS_ResNet(ResNet):
    method __init__ (line 233) | def __init__(self,
    method init_weights (line 286) | def init_weights(self):
    method make_res_layer (line 315) | def make_res_layer(self, **kwargs):
    method forward (line 319) | def forward(self, x):
    method rfp_forward (line 326) | def rfp_forward(self, x, rfp_feats):

FILE: mmdet/models/backbones/detectors_resnext.py
  class Bottleneck (line 10) | class Bottleneck(_Bottleneck):
    method __init__ (line 13) | def __init__(self,
  class DetectoRS_ResNeXt (line 98) | class DetectoRS_ResNeXt(DetectoRS_ResNet):
    method __init__ (line 112) | def __init__(self, groups=1, base_width=4, **kwargs):
    method make_res_layer (line 117) | def make_res_layer(self, **kwargs):

FILE: mmdet/models/backbones/hourglass.py
  class HourglassModule (line 10) | class HourglassModule(BaseModule):
    method __init__ (line 26) | def __init__(self,
    method forward (line 74) | def forward(self, x):
  class HourglassNet (line 85) | class HourglassNet(BaseModule):
    method __init__ (line 119) | def __init__(self,
    method init_weights (line 175) | def init_weights(self):
    method forward (line 183) | def forward(self, x):

FILE: mmdet/models/backbones/hrnet.py
  class HRModule (line 12) | class HRModule(BaseModule):
    method __init__ (line 19) | def __init__(self,
    method _check_branches (line 48) | def _check_branches(self, num_branches, num_blocks, in_channels,
    method _make_one_branch (line 65) | def _make_one_branch(self,
    method _make_branches (line 111) | def _make_branches(self, num_branches, block, num_blocks, num_channels):
    method _make_fuse_layers (line 120) | def _make_fuse_layers(self):
    method forward (line 182) | def forward(self, x):
  class HRNet (line 203) | class HRNet(BaseModule):
    method __init__ (line 267) | def __init__(self,
    method norm1 (line 378) | def norm1(self):
    method norm2 (line 383) | def norm2(self):
    method _make_transition_layer (line 387) | def _make_transition_layer(self, num_channels_pre_layer,
    method _make_layer (line 433) | def _make_layer(self, block, inplanes, planes, blocks, stride=1):
    method _make_stage (line 480) | def _make_stage(self, layer_config, in_channels, multiscale_output=True):
    method forward (line 520) | def forward(self, x):
    method train (line 556) | def train(self, mode=True):

FILE: mmdet/models/backbones/regnet.py
  class RegNet (line 13) | class RegNet(ResNet):
    method __init__ (line 89) | def __init__(self,
    method _make_stem_layer (line 237) | def _make_stem_layer(self, in_channels, base_channels):
    method generate_regnet (line 251) | def generate_regnet(self,
    method quantize_float (line 284) | def quantize_float(number, divisor):
    method adjust_width_group (line 296) | def adjust_width_group(self, widths, bottleneck_ratio, groups):
    method get_stages_from_blocks (line 321) | def get_stages_from_blocks(self, widths):
    method forward (line 343) | def forward(self, x):

FILE: mmdet/models/backbones/res2net.py
  class Bottle2neck (line 14) | class Bottle2neck(_Bottleneck):
    method __init__ (line 17) | def __init__(self,
    method forward (line 105) | def forward(self, x):
  class Res2Layer (line 162) | class Res2Layer(Sequential):
    method __init__ (line 181) | def __init__(self,
  class Res2Net (line 242) | class Res2Net(ResNet):
    method __init__ (line 302) | def __init__(self,
    method make_res_layer (line 321) | def make_res_layer(self, **kwargs):

FILE: mmdet/models/backbones/resnest.py
  class RSoftmax (line 16) | class RSoftmax(nn.Module):
    method __init__ (line 24) | def __init__(self, radix, groups):
    method forward (line 29) | def forward(self, x):
  class SplitAttentionConv2d (line 40) | class SplitAttentionConv2d(BaseModule):
    method __init__ (line 63) | def __init__(self,
    method norm0 (line 115) | def norm0(self):
    method norm1 (line 120) | def norm1(self):
    method forward (line 124) | def forward(self, x):
  class Bottleneck (line 153) | class Bottleneck(_Bottleneck):
    method __init__ (line 172) | def __init__(self,
    method forward (line 233) | def forward(self, x):
  class ResNeSt (line 277) | class ResNeSt(ResNetV1d):
    method __init__ (line 298) | def __init__(self,
    method make_res_layer (line 312) | def make_res_layer(self, **kwargs):

FILE: mmdet/models/backbones/resnet.py
  class BasicBlock (line 13) | class BasicBlock(BaseModule):
    method __init__ (line 16) | def __init__(self,
    method norm1 (line 57) | def norm1(self):
    method norm2 (line 62) | def norm2(self):
    method forward (line 66) | def forward(self, x):
  class Bottleneck (line 96) | class Bottleneck(BaseModule):
    method __init__ (line 99) | def __init__(self,
    method make_block_plugins (line 218) | def make_block_plugins(self, in_channels, plugins):
    method forward_plugin (line 241) | def forward_plugin(self, x, plugin_names):
    method norm1 (line 248) | def norm1(self):
    method norm2 (line 253) | def norm2(self):
    method norm3 (line 258) | def norm3(self):
    method forward (line 262) | def forward(self, x):
  class ResNet (line 305) | class ResNet(BaseModule):
    method __init__ (line 368) | def __init__(self,
    method make_stage_plugins (line 493) | def make_stage_plugins(self, plugins, stage_idx):
    method make_res_layer (line 555) | def make_res_layer(self, **kwargs):
    method norm1 (line 560) | def norm1(self):
    method _make_stem_layer (line 564) | def _make_stem_layer(self, in_channels, stem_channels):
    method _freeze_stages (line 612) | def _freeze_stages(self):
    method forward (line 630) | def forward(self, x):
    method train (line 650) | def train(self, mode=True):
  class ResNetV1d (line 663) | class ResNetV1d(ResNet):
    method __init__ (line 672) | def __init__(self, **kwargs):

FILE: mmdet/models/backbones/resnext.py
  class Bottleneck (line 11) | class Bottleneck(_Bottleneck):
    method __init__ (line 14) | def __init__(self,
    method _del_block_plugins (line 97) | def _del_block_plugins(self, plugin_names):
  class ResNeXt (line 109) | class ResNeXt(ResNet):
    method __init__ (line 142) | def __init__(self, groups=1, base_width=4, **kwargs):
    method make_res_layer (line 147) | def make_res_layer(self, **kwargs):

FILE: mmdet/models/backbones/ssd_vgg.py
  class SSDVGG (line 13) | class SSDVGG(VGG, BaseModule):
    method __init__ (line 42) | def __init__(self,
    method init_weights (line 109) | def init_weights(self, pretrained=None):
    method forward (line 112) | def forward(self, x):
    method _make_extra_layers (line 129) | def _make_extra_layers(self, outplanes):
  class L2Norm (line 156) | class L2Norm(nn.Module):
    method __init__ (line 158) | def __init__(self, n_dims, scale=20., eps=1e-10):
    method forward (line 173) | def forward(self, x):

FILE: mmdet/models/backbones/swin.py
  function _ntuple (line 20) | def _ntuple(n):
  function swin_converter (line 37) | def swin_converter(ckpt):
  class WindowMSA (line 92) | class WindowMSA(BaseModule):
    method __init__ (line 111) | def __init__(self,
    method init_weights (line 148) | def init_weights(self):
    method forward (line 151) | def forward(self, x, mask=None):
    method double_step_seq (line 192) | def double_step_seq(step1, len1, step2, len2):
  class ShiftWindowMSA (line 198) | class ShiftWindowMSA(BaseModule):
    method __init__ (line 221) | def __init__(self,
    method forward (line 251) | def forward(self, query, hw_shape):
    method window_reverse (line 327) | def window_reverse(self, windows, H, W):
    method window_partition (line 343) | def window_partition(self, x):
  class SwinBlock (line 359) | class SwinBlock(BaseModule):
    method __init__ (line 384) | def __init__(self,
    method forward (line 429) | def forward(self, x, hw_shape):
  class SwinBlockSequence (line 452) | class SwinBlockSequence(BaseModule):
    method __init__ (line 481) | def __init__(self,
    method forward (line 526) | def forward(self, x, hw_shape):
  class SwinTransformer (line 538) | class SwinTransformer(BaseModule):
    method __init__ (line 595) | def __init__(self,
    method train (line 714) | def train(self, mode=True):
    method _freeze_stages (line 719) | def _freeze_stages(self):
    method init_weights (line 741) | def init_weights(self):
    method forward (line 818) | def forward(self, x):

FILE: mmdet/models/backbones/trident_resnet.py
  class TridentConv (line 13) | class TridentConv(BaseModule):
    method __init__ (line 32) | def __init__(self,
    method extra_repr (line 60) | def extra_repr(self):
    method forward (line 72) | def forward(self, inputs):
  class TridentBottleneck (line 92) | class TridentBottleneck(Bottleneck):
    method __init__ (line 105) | def __init__(self, trident_dilations, test_branch_idx, concat_output,
    method forward (line 127) | def forward(self, x):
  function make_trident_res_layer (line 181) | def make_trident_res_layer(block,
  class TridentResNet (line 234) | class TridentResNet(ResNet):
    method __init__ (line 255) | def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,

FILE: mmdet/models/builder.py
  function build_backbone (line 17) | def build_backbone(cfg):
  function build_neck (line 22) | def build_neck(cfg):
  function build_roi_extractor (line 27) | def build_roi_extractor(cfg):
  function build_shared_head (line 32) | def build_shared_head(cfg):
  function build_head (line 37) | def build_head(cfg):
  function build_loss (line 42) | def build_loss(cfg):
  function build_detector (line 47) | def build_detector(cfg, train_cfg=None, test_cfg=None):

FILE: mmdet/models/dense_heads/anchor_free_head.py
  class AnchorFreeHead (line 16) | class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
    method __init__ (line 42) | def __init__(self,
    method _init_layers (line 90) | def _init_layers(self):
    method _init_cls_convs (line 96) | def _init_cls_convs(self):
    method _init_reg_convs (line 116) | def _init_reg_convs(self):
    method _init_predictor (line 136) | def _init_predictor(self):
    method _load_from_state_dict (line 143) | def _load_from_state_dict(self, state_dict, prefix, local_metadata, st...
    method forward (line 181) | def forward(self, feats):
    method forward_single (line 199) | def forward_single(self, x):
    method loss (line 224) | def loss(self,
    method get_bboxes (line 253) | def get_bboxes(self,
    method get_targets (line 276) | def get_targets(self, points, gt_bboxes_list, gt_labels_list):
    method _get_points_single (line 290) | def _get_points_single(self,
    method get_points (line 308) | def get_points(self, featmap_sizes, dtype, device, flatten=False):
    method aug_test (line 326) | def aug_test(self, feats, img_metas, rescale=False):

FILE: mmdet/models/dense_heads/anchor_head.py
  class AnchorHead (line 14) | class AnchorHead(BaseDenseHead, BBoxTestMixin):
    method __init__ (line 36) | def __init__(self,
    method _init_layers (line 99) | def _init_layers(self):
    method forward_single (line 105) | def forward_single(self, x):
    method forward (line 122) | def forward(self, feats):
    method get_anchors (line 141) | def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
    method _get_targets_single (line 171) | def _get_targets_single(self,
    method get_targets (line 269) | def get_targets(self,
    method loss_single (line 371) | def loss_single(self, cls_score, bbox_pred, anchors, labels, label_wei...
    method loss (line 422) | def loss(self,
    method get_bboxes (line 492) | def get_bboxes(self,
    method _get_bboxes (line 588) | def _get_bboxes(self,
    method aug_test (line 725) | def aug_test(self, feats, img_metas, rescale=False):

FILE: mmdet/models/dense_heads/atss_head.py
  class ATSSHead (line 14) | class ATSSHead(AnchorHead):
    method __init__ (line 24) | def __init__(self,
    method _init_layers (line 58) | def _init_layers(self):
    method forward (line 95) | def forward(self, feats):
    method forward_single (line 113) | def forward_single(self, x, scale):
    method loss_single (line 142) | def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels,
    method loss (line 218) | def loss(self,
    method centerness_target (line 293) | def centerness_target(self, anchors, bbox_targets):
    method get_bboxes (line 312) | def get_bboxes(self,
    method _get_bboxes (line 371) | def _get_bboxes(self,
    method get_targets (line 505) | def get_targets(self,
    method _get_target_single (line 569) | def _get_target_single(self,
    method get_num_level_anchors_inside (line 679) | def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):

FILE: mmdet/models/dense_heads/autoassign_head.py
  class CenterPrior (line 17) | class CenterPrior(nn.Module):
    method __init__ (line 33) | def __init__(self,
    method forward (line 45) | def forward(self, anchor_points_list, gt_bboxes, labels,
  class AutoAssignHead (line 124) | class AutoAssignHead(FCOSHead):
    method __init__ (line 143) | def __init__(self,
    method init_weights (line 161) | def init_weights(self):
    method _get_points_single (line 173) | def _get_points_single(self,
    method forward_single (line 189) | def forward_single(self, x, scale, stride):
    method get_pos_loss_single (line 214) | def get_pos_loss_single(self, cls_score, objectness, reg_loss, gt_labels,
    method get_neg_loss_single (line 258) | def get_neg_loss_single(self, cls_score, objectness, gt_labels, ious,
    method loss (line 314) | def loss(self,
    method get_targets (line 444) | def get_targets(self, points, gt_bboxes_list):
    method _get_target_single (line 481) | def _get_target_single(self, gt_bboxes, points):

FILE: mmdet/models/dense_heads/base_dense_head.py
  class BaseDenseHead (line 6) | class BaseDenseHead(BaseModule, metaclass=ABCMeta):
    method __init__ (line 9) | def __init__(self, init_cfg=None):
    method loss (line 13) | def loss(self, **kwargs):
    method get_bboxes (line 18) | def get_bboxes(self, **kwargs):
    method forward_train (line 22) | def forward_train(self,

FILE: mmdet/models/dense_heads/cascade_rpn_head.py
  class AdaptiveConv (line 18) | class AdaptiveConv(BaseModule):
    method __init__ (line 40) | def __init__(self,
    method forward (line 77) | def forward(self, x, offset):
  class StageCascadeRPNHead (line 94) | class StageCascadeRPNHead(RPNHead):
    method __init__ (line 110) | def __init__(self,
    method _init_layers (line 151) | def _init_layers(self):
    method forward_single (line 162) | def forward_single(self, x, offset):
    method forward (line 172) | def forward(self, feats, offset_list=None):
    method _region_targets_single (line 178) | def _region_targets_single(self,
    method region_targets (line 234) | def region_targets(self,
    method get_targets (line 284) | def get_targets(self,
    method anchor_offset (line 326) | def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes):
    method loss_single (line 404) | def loss_single(self, cls_score, bbox_pred, anchors, labels, label_wei...
    method loss (line 434) | def loss(self,
    method get_bboxes (line 503) | def get_bboxes(self,
    method refine_bboxes (line 530) | def refine_bboxes(self, anchor_list, bbox_preds, img_metas):
    method _get_bboxes_single (line 547) | def _get_bboxes_single(self,
  class CascadeRPNHead (line 675) | class CascadeRPNHead(BaseDenseHead):
    method __init__ (line 690) | def __init__(self, num_stages, stages, train_cfg, test_cfg, init_cfg=N...
    method loss (line 705) | def loss(self):
    method get_bboxes (line 709) | def get_bboxes(self):
    method forward_train (line 713) | def forward_train(self,
    method simple_test_rpn (line 758) | def simple_test_rpn(self, x, img_metas):
    method aug_test_rpn (line 783) | def aug_test_rpn(self, x, img_metas):

FILE: mmdet/models/dense_heads/centripetal_head.py
  class CentripetalHead (line 11) | class CentripetalHead(CornerHead):
    method __init__ (line 46) | def __init__(self,
    method _init_centripetal_layers (line 71) | def _init_centripetal_layers(self):
    method _init_layers (line 131) | def _init_layers(self):
    method init_weights (line 139) | def init_weights(self):
    method forward_single (line 155) | def forward_single(self, x, lvl_ind):
    method loss (line 205) | def loss(self,
    method loss_single (line 281) | def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift,
    method get_bboxes (line 363) | def get_bboxes(self,

FILE: mmdet/models/dense_heads/corner_head.py
  class BiCornerPool (line 17) | class BiCornerPool(BaseModule):
    method __init__ (line 30) | def __init__(self,
    method forward (line 60) | def forward(self, x):
  class CornerHead (line 81) | class CornerHead(BaseDenseHead):
    method __init__ (line 114) | def __init__(self,
    method _make_layers (line 153) | def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
    method _init_corner_kpt_layers (line 160) | def _init_corner_kpt_layers(self):
    method _init_corner_emb_layers (line 198) | def _init_corner_emb_layers(self):
    method _init_layers (line 216) | def _init_layers(self):
    method init_weights (line 225) | def init_weights(self):
    method forward (line 243) | def forward(self, feats):
    method forward_single (line 275) | def forward_single(self, x, lvl_ind, return_pool=False):
    method get_targets (line 320) | def get_targets(self,
    method loss (line 508) | def loss(self,
    method loss_single (line 570) | def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
    method get_bboxes (line 648) | def get_bboxes(self,
    method _get_bboxes_single (line 697) | def _get_bboxes_single(self,
    method _bboxes_nms (line 775) | def _bboxes_nms(self, bboxes, labels, cfg):
    method _gather_feat (line 797) | def _gather_feat(self, feat, ind, mask=None):
    method _local_maximum (line 817) | def _local_maximum(self, heat, kernel=3):
    method _transpose_and_gather_feat (line 833) | def _transpose_and_gather_feat(self, feat, ind):
    method _topk (line 848) | def _topk(self, scores, k=20):
    method decode_heatmap (line 874) | def decode_heatmap(self,

FILE: mmdet/models/dense_heads/deformable_detr_head.py
  class DeformableDETRHead (line 16) | class DeformableDETRHead(DETRHead):
    method __init__ (line 35) | def __init__(self,
    method _init_layers (line 49) | def _init_layers(self):
    method init_weights (line 82) | def init_weights(self):
    method forward (line 96) | def forward(self, mlvl_feats, img_metas):
    method loss (line 183) | def loss(self,
    method get_bboxes (line 268) | def get_bboxes(self,

FILE: mmdet/models/dense_heads/dense_test_mixins.py
  class BBoxTestMixin (line 8) | class BBoxTestMixin(object):
    method merge_aug_bboxes (line 11) | def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):
    method aug_test_bboxes (line 38) | def aug_test_bboxes(self, feats, img_metas, rescale=False):

FILE: mmdet/models/dense_heads/detr_head.py
  class DETRHead (line 17) | class DETRHead(AnchorFreeHead):
    method __init__ (line 51) | def __init__(self,
    method _init_layers (line 151) | def _init_layers(self):
    method init_weights (line 166) | def init_weights(self):
    method _load_from_state_dict (line 171) | def _load_from_state_dict(self, state_dict, prefix, local_metadata, st...
    method forward (line 200) | def forward(self, feats, img_metas):
    method forward_single (line 224) | def forward_single(self, x, img_metas):
    method loss (line 266) | def loss(self,
    method loss_single (line 332) | def loss_single(self,
    method get_targets (line 418) | def get_targets(self,
    method _get_target_single (line 475) | def _get_target_single(self,
    method forward_train (line 546) | def forward_train(self,
    method get_bboxes (line 582) | def get_bboxes(self,
    method _get_bboxes_single (line 627) | def _get_bboxes_single(self,

FILE: mmdet/models/dense_heads/embedding_rpn_head.py
  class EmbeddingRPNHead (line 10) | class EmbeddingRPNHead(BaseModule):
    method __init__ (line 25) | def __init__(self,
    method _init_layers (line 37) | def _init_layers(self):
    method init_weights (line 43) | def init_weights(self):
    method _decode_init_proposals (line 53) | def _decode_init_proposals(self, imgs, img_metas):
    method forward_dummy (line 95) | def forward_dummy(self, img, img_metas):
    method forward_train (line 102) | def forward_train(self, img, img_metas):
    method simple_test_rpn (line 106) | def simple_test_rpn(self, img, img_metas):

FILE: mmdet/models/dense_heads/fcos_head.py
  class FCOSHead (line 16) | class FCOSHead(AnchorFreeHead):
    method __init__ (line 59) | def __init__(self,
    method _init_layers (line 105) | def _init_layers(self):
    method forward (line 112) | def forward(self, feats):
    method forward_single (line 133) | def forward_single(self, x, scale, stride):
    method loss (line 165) | def loss(self,
    method get_bboxes (line 265) | def get_bboxes(self,
    method _get_bboxes (line 330) | def _get_bboxes(self,
    method _get_points_single (line 474) | def _get_points_single(self,
    method get_targets (line 486) | def get_targets(self, points, gt_bboxes_list, gt_labels_list):
    method _get_target_single (line 547) | def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ran...
    method centerness_target (line 631) | def centerness_target(self, pos_bbox_targets):

FILE: mmdet/models/dense_heads/fovea_head.py
  class FeatureAlign (line 14) | class FeatureAlign(BaseModule):
    method __init__ (line 16) | def __init__(self,
    method forward (line 39) | def forward(self, x, shape):
  class FoveaHead (line 46) | class FoveaHead(AnchorFreeHead):
    method __init__ (line 51) | def __init__(self,
    method _init_layers (line 77) | def _init_layers(self):
    method forward_single (line 117) | def forward_single(self, x):
    method _get_points_single (line 130) | def _get_points_single(self, *args, **kwargs):
    method loss (line 134) | def loss(self,
    method get_targets (line 184) | def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, poin...
    method _get_target_single (line 206) | def _get_target_single(self,
    method get_bboxes (line 267) | def get_bboxes(self,
    method _get_bboxes_single (line 298) | def _get_bboxes_single(self,

FILE: mmdet/models/dense_heads/free_anchor_retina_head.py
  class FreeAnchorRetinaHead (line 12) | class FreeAnchorRetinaHead(RetinaHead):
    method __init__ (line 33) | def __init__(self,
    method loss (line 53) | def loss(self,
    method positive_bag_loss (line 219) | def positive_bag_loss(self, matched_cls_prob, matched_box_prob):
    method negative_bag_loss (line 246) | def negative_bag_loss(self, cls_prob, box_prob):

FILE: mmdet/models/dense_heads/fsaf_head.py
  class FSAFHead (line 14) | class FSAFHead(RetinaHead):
    method __init__ (line 42) | def __init__(self, *args, score_threshold=None, init_cfg=None, **kwargs):
    method forward_single (line 62) | def forward_single(self, x):
    method _get_targets_single (line 79) | def _get_targets_single(self,
    method loss (line 187) | def loss(self,
    method calculate_pos_recall (line 316) | def calculate_pos_recall(self, cls_scores, labels_list, pos_inds):
    method collect_loss_level_single (line 350) | def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds,
    method reweight_loss_single (line 381) | def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds,

FILE: mmdet/models/dense_heads/ga_retina_head.py
  class GARetinaHead (line 10) | class GARetinaHead(GuidedAnchorHead):
    method __init__ (line 13) | def __init__(self,
    method _init_layers (line 44) | def _init_layers(self):
    method forward_single (line 91) | def forward_single(self, x):

FILE: mmdet/models/dense_heads/ga_rpn_head.py
  class GARPNHead (line 16) | class GARPNHead(RPNTestMixin, GuidedAnchorHead):
    method __init__ (line 19) | def __init__(self,
    method _init_layers (line 34) | def _init_layers(self):
    method forward_single (line 40) | def forward_single(self, x):
    method loss (line 49) | def loss(self,
    method _get_bboxes_single (line 72) | def _get_bboxes_single(self,

FILE: mmdet/models/dense_heads/gfl_head.py
  class Integral (line 15) | class Integral(nn.Module):
    method __init__ (line 28) | def __init__(self, reg_max=16):
    method forward (line 34) | def forward(self, x):
  class GFLHead (line 52) | class GFLHead(AnchorHead):
    method __init__ (line 85) | def __init__(self,
    method _init_layers (line 120) | def _init_layers(self):
    method forward (line 153) | def forward(self, feats):
    method forward_single (line 171) | def forward_single(self, x, scale):
    method anchor_center (line 197) | def anchor_center(self, anchors):
    method loss_single (line 210) | def loss_single(self, anchors, cls_score, bbox_pred, labels, label_wei...
    method loss (line 299) | def loss(self,
    method _get_bboxes (line 372) | def _get_bboxes(self,
    method get_targets (line 473) | def get_targets(self,
    method _get_target_single (line 537) | def _get_target_single(self,
    method get_num_level_anchors_inside (line 643) | def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):

FILE: mmdet/models/dense_heads/guided_anchor_head.py
  class FeatureAdaption (line 14) | class FeatureAdaption(BaseModule):
    method __init__ (line 29) | def __init__(self,
    method forward (line 52) | def forward(self, x, shape):
  class GuidedAnchorHead (line 59) | class GuidedAnchorHead(AnchorHead):
    method __init__ (line 95) | def __init__(
    method _init_layers (line 208) | def _init_layers(self):
    method forward_single (line 223) | def forward_single(self, x):
    method forward (line 236) | def forward(self, feats):
    method get_sampled_approxs (line 239) | def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'):
    method get_anchors (line 289) | def get_anchors(self,
    method _get_guided_anchors_single (line 340) | def _get_guided_anchors_single(self,
    method ga_loc_targets (line 374) | def ga_loc_targets(self, gt_bboxes_list, featmap_sizes):
    method _ga_shape_target_single (line 476) | def _ga_shape_target_single(self,
    method ga_shape_targets (line 539) | def ga_shape_targets(self,
    method loss_shape_single (line 605) | def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts,
    method loss_loc_single (line 629) | def loss_loc_single(self, loc_pred, loc_target, loc_weight,
    method loss (line 640) | def loss(self,
    method get_bboxes (line 746) | def get_bboxes(self,
    method _get_bboxes_single (line 790) | def _get_bboxes_single(self,

FILE: mmdet/models/dense_heads/ld_head.py
  class LDHead (line 11) | class LDHead(GFLHead):
    method __init__ (line 26) | def __init__(self,
    method loss_single (line 38) | def loss_single(self, anchors, cls_score, bbox_pred, labels, label_wei...
    method forward_train (line 142) | def forward_train(self,
    method loss (line 185) | def loss(self,

FILE: mmdet/models/dense_heads/nasfcos_head.py
  class NASFCOSHead (line 11) | class NASFCOSHead(FCOSHead):
    method __init__ (line 19) | def __init__(self, *args, init_cfg=None, **kwargs):
    method _init_layers (line 38) | def _init_layers(self):

FILE: mmdet/models/dense_heads/paa_head.py
  function levels_to_images (line 17) | def levels_to_images(mlvl_tensor):
  class PAAHead (line 45) | class PAAHead(ATSSHead):
    method __init__ (line 73) | def __init__(self,
    method loss (line 86) | def loss(self,
    method get_pos_loss (line 201) | def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_wei...
    method paa_reassign (line 257) | def paa_reassign(self, pos_losses, label, label_weight, bbox_weight,
    method gmm_separation_scheme (line 366) | def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm):
    method get_targets (line 401) | def get_targets(
    method _get_targets_single (line 493) | def _get_targets_single(self,
    method _get_bboxes (line 519) | def _get_bboxes(self,
    method score_voting (line 606) | def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,

FILE: mmdet/models/dense_heads/pisa_retinanet_head.py
  class PISARetinaHead (line 11) | class PISARetinaHead(RetinaHead):
    method loss (line 22) | def loss(self,

FILE: mmdet/models/dense_heads/pisa_ssd_head.py
  class PISASSDHead (line 11) | class PISASSDHead(SSDHead):
    method loss (line 13) | def loss(self,

FILE: mmdet/models/dense_heads/query_generator.py
  class InitialQueryGenerator (line 10) | class InitialQueryGenerator(BaseModule):
    method __init__ (line 16) | def __init__(self,
    method _init_layers (line 28) | def _init_layers(self):
    method init_weights (line 33) | def init_weights(self):
    method _decode_init_proposals (line 38) | def _decode_init_proposals(self, imgs, img_metas):
    method forward_dummy (line 72) | def forward_dummy(self, img, img_metas):
    method forward_train (line 79) | def forward_train(self, img, img_metas):
    method simple_test_rpn (line 83) | def simple_test_rpn(self, img, img_metas):

FILE: mmdet/models/dense_heads/reppoints_head.py
  class RepPointsHead (line 14) | class RepPointsHead(AnchorFreeHead):
    method __init__ (line 33) | def __init__(self,
    method _init_layers (line 122) | def _init_layers(self):
    method points2bbox (line 166) | def points2bbox(self, pts, y_first=True):
    method gen_grid_from_reg (line 217) | def gen_grid_from_reg(self, reg, previous_boxes):
    method forward (line 251) | def forward(self, feats):
    method forward_single (line 254) | def forward_single(self, x):
    method get_points (line 296) | def get_points(self, featmap_sizes, img_metas, device):
    method centers_to_bboxes (line 336) | def centers_to_bboxes(self, point_list):
    method offset_to_pts (line 354) | def offset_to_pts(self, center_list, pred_list):
    method _point_target_single (line 375) | def _point_target_single(self,
    method get_targets (line 447) | def get_targets(self,
    method loss_single (line 531) | def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
    method loss (line 569) | def loss(self,
    method get_bboxes (line 665) | def get_bboxes(self,
    method _get_bboxes_single (line 703) | def _get_bboxes_single(self,

FILE: mmdet/models/dense_heads/retina_head.py
  class RetinaHead (line 9) | class RetinaHead(AnchorHead):
    method __init__ (line 28) | def __init__(self,
    method _init_layers (line 60) | def _init_layers(self):
    method forward_single (line 93) | def forward_single(self, x):

FILE: mmdet/models/dense_heads/retina_sepbn_head.py
  class RetinaSepBNHead (line 9) | class RetinaSepBNHead(AnchorHead):
    method __init__ (line 17) | def __init__(self,
    method _init_layers (line 35) | def _init_layers(self):
    method init_weights (line 77) | def init_weights(self):
    method forward (line 88) | def forward(self, feats):

FILE: mmdet/models/dense_heads/rpn_head.py
  class RPNHead (line 16) | class RPNHead(RPNTestMixin, AnchorHead):
    method __init__ (line 24) | def __init__(self,
    method _init_layers (line 31) | def _init_layers(self):
    method forward_single (line 39) | def forward_single(self, x):
    method loss (line 47) | def loss(self,
    method _get_bboxes (line 80) | def _get_bboxes(self,

FILE: mmdet/models/dense_heads/rpn_test_mixin.py
  class RPNTestMixin (line 9) | class RPNTestMixin(object):
    method async_simple_test_rpn (line 14) | async def async_simple_test_rpn(self, x, img_metas):
    method simple_test_rpn (line 24) | def simple_test_rpn(self, x, img_metas):
    method aug_test_rpn (line 39) | def aug_test_rpn(self, feats, img_metas):

FILE: mmdet/models/dense_heads/sabl_retina_head.py
  class SABLRetinaHead (line 16) | class SABLRetinaHead(BaseDenseHead):
    method __init__ (line 49) | def __init__(self,
    method _init_layers (line 151) | def _init_layers(self):
    method forward_single (line 182) | def forward_single(self, x):
    method forward (line 195) | def forward(self, feats):
    method get_anchors (line 198) | def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
    method get_target (line 219) | def get_target(self,
    method _get_target_single (line 319) | def _get_target_single(self,
    method loss_single (line 436) | def loss_single(self, cls_score, bbox_pred, labels, label_weights,
    method loss (line 469) | def loss(self,
    method get_bboxes (line 523) | def get_bboxes(self,
    method get_bboxes_single (line 557) | def get_bboxes_single(self,

FILE: mmdet/models/dense_heads/ssd_head.py
  class SSDHead (line 15) | class SSDHead(AnchorHead):
    method __init__ (line 34) | def __init__(self,
    method forward (line 98) | def forward(self, feats):
    method loss_single (line 122) | def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weig...
    method loss (line 180) | def loss(self,

FILE: mmdet/models/dense_heads/vfnet_head.py
  class VFNetHead (line 19) | class VFNetHead(ATSSHead, FCOSHead):
    method __init__ (line 65) | def __init__(self,
    method _init_layers (line 158) | def _init_layers(self):
    method forward (line 193) | def forward(self, feats):
    method forward_single (line 215) | def forward_single(self, x, scale, scale_refine, stride, reg_denom):
    method star_dcn_offset (line 273) | def star_dcn_offset(self, bbox_pred, gradient_mul, stride):
    method loss (line 315) | def loss(self,
    method get_bboxes (line 463) | def get_bboxes(self,
    method _get_bboxes_single (line 521) | def _get_bboxes_single(self,
    method _get_points_single (line 596) | def _get_points_single(self,
    method get_targets (line 619) | def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,
    method _get_target_single (line 654) | def _get_target_single(self, *args, **kwargs):
    method get_fcos_targets (line 661) | def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):
    method get_atss_targets (line 687) | def get_atss_targets(self,
    method transform_bbox_targets (line 762) | def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):
    method _load_from_state_dict (line 787) | def _load_from_state_dict(self, state_dict, prefix, local_metadata, st...

FILE: mmdet/models/dense_heads/yolact_head.py
  class YOLACTHead (line 14) | class YOLACTHead(AnchorHead):
    method __init__ (line 42) | def __init__(self,
    method _init_layers (line 87) | def _init_layers(self):
    method forward_single (line 115) | def forward_single(self, x):
    method loss (line 138) | def loss(self,
    method loss_single_OHEM (line 259) | def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels,
    method get_bboxes (line 295) | def get_bboxes(self,
    method _get_bboxes_single (line 362) | def _get_bboxes_single(self,
  class YOLACTSegmHead (line 460) | class YOLACTSegmHead(BaseModule):
    method __init__ (line 475) | def __init__(self,
    method _init_layers (line 493) | def _init_layers(self):
    method forward (line 498) | def forward(self, x):
    method loss (line 512) | def loss(self, segm_pred, gt_masks, gt_labels):
    method get_targets (line 545) | def get_targets(self, segm_pred, gt_masks, gt_labels):
  class YOLACTProtonet (line 577) | class YOLACTProtonet(BaseModule):
    method __init__ (line 596) | def __init__(self,
    method _init_layers (line 622) | def _init_layers(self):
    method forward (line 659) | def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=No...
    method loss (line 729) | def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_resu...
    method get_targets (line 802) | def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds):
    method get_seg_masks (line 827) | def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale):
    method crop (line 862) | def crop(self, masks, boxes, padding=1):
    method sanitize_coordinates (line 896) | def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True):
  class InterpolateModule (line 928) | class InterpolateModule(BaseModule):
    method __init__ (line 934) | def __init__(self, *args, init_cfg=None, **kwargs):
    method forward (line 940) | def forward(self, x):

FILE: mmdet/models/dense_heads/yolo_head.py
  class YOLOV3Head (line 20) | class YOLOV3Head(BaseDenseHead, BBoxTestMixin):
    method __init__ (line 48) | def __init__(self,
    method num_levels (line 121) | def num_levels(self):
    method num_attrib (line 125) | def num_attrib(self):
    method _init_layers (line 131) | def _init_layers(self):
    method forward (line 149) | def forward(self, feats):
    method get_bboxes (line 172) | def get_bboxes(self,
    method _get_bboxes (line 209) | def _get_bboxes(self,
    method loss (line 396) | def loss(self,
    method loss_single (line 446) | def loss_single(self, pred_map, target_map, neg_map):
    method get_targets (line 491) | def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list,
    method _get_targets_single (line 527) | def _get_targets_single(self, anchors, responsible_flags, gt_bboxes,
    method aug_test (line 588) | def aug_test(self, feats, img_metas, rescale=False):

FILE: mmdet/models/dense_heads/yolof_head.py
  function levels_to_images (line 14) | def levels_to_images(mlvl_tensor):
  class YOLOFHead (line 42) | class YOLOFHead(AnchorHead):
    method __init__ (line 55) | def __init__(self,
    method _init_layers (line 67) | def _init_layers(self):
    method init_weights (line 107) | def init_weights(self):
    method forward_single (line 118) | def forward_single(self, feature):
    method loss (line 136) | def loss(self,
    method get_targets (line 223) | def get_targets(self,
    method _get_targets_single (line 310) | def _get_targets_single(self,

FILE: mmdet/models/detectors/atss.py
  class ATSS (line 6) | class ATSS(SingleStageDetector):
    method __init__ (line 9) | def __init__(self,

FILE: mmdet/models/detectors/autoassign.py
  class AutoAssign (line 6) | class AutoAssign(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/base.py
  class BaseDetector (line 13) | class BaseDetector(BaseModule, metaclass=ABCMeta):
    method __init__ (line 16) | def __init__(self, init_cfg=None):
    method with_neck (line 21) | def with_neck(self):
    method with_shared_head (line 28) | def with_shared_head(self):
    method with_bbox (line 33) | def with_bbox(self):
    method with_mask (line 39) | def with_mask(self):
    method extract_feat (line 45) | def extract_feat(self, imgs):
    method extract_feats (line 49) | def extract_feats(self, imgs):
    method forward_train (line 62) | def forward_train(self, imgs, img_metas, **kwargs):
    method async_simple_test (line 81) | async def async_simple_test(self, img, img_metas, **kwargs):
    method simple_test (line 85) | def simple_test(self, img, img_metas, **kwargs):
    method aug_test (line 89) | def aug_test(self, imgs, img_metas, **kwargs):
    method aforward_test (line 93) | async def aforward_test(self, *, img, img_metas, **kwargs):
    method forward_test (line 111) | def forward_test(self, imgs, img_metas, **kwargs):
    method forward (line 156) | def forward(self, img, img_metas, return_loss=True, **kwargs):
    method _parse_losses (line 171) | def _parse_losses(self, losses):
    method train_step (line 206) | def train_step(self, data, optimizer):
    method val_step (line 241) | def val_step(self, data, optimizer):
    method show_result (line 256) | def show_result(self,

FILE: mmdet/models/detectors/cascade_rcnn.py
  class CascadeRCNN (line 6) | class CascadeRCNN(TwoStageDetector):
    method __init__ (line 10) | def __init__(self,
    method show_result (line 29) | def show_result(self, data, result, **kwargs):

FILE: mmdet/models/detectors/cornernet.py
  class CornerNet (line 9) | class CornerNet(SingleStageDetector):
    method __init__ (line 16) | def __init__(self,
    method merge_aug_results (line 27) | def merge_aug_results(self, aug_results, img_metas):
    method aug_test (line 61) | def aug_test(self, imgs, img_metas, rescale=False):

FILE: mmdet/models/detectors/deformable_detr.py
  class DeformableDETR (line 6) | class DeformableDETR(DETR):
    method __init__ (line 8) | def __init__(self, *args, **kwargs):

FILE: mmdet/models/detectors/detr.py
  class DETR (line 7) | class DETR(SingleStageDetector):
    method __init__ (line 11) | def __init__(self,
    method simple_test (line 21) | def simple_test(self, img, img_metas, rescale=False):

FILE: mmdet/models/detectors/fast_rcnn.py
  class FastRCNN (line 6) | class FastRCNN(TwoStageDetector):
    method __init__ (line 9) | def __init__(self,
    method forward_test (line 26) | def forward_test(self, imgs, img_metas, proposals, **kwargs):

FILE: mmdet/models/detectors/faster_rcnn.py
  class FasterRCNN (line 6) | class FasterRCNN(TwoStageDetector):
    method __init__ (line 9) | def __init__(self,

FILE: mmdet/models/detectors/fcos.py
  class FCOS (line 6) | class FCOS(SingleStageDetector):
    method __init__ (line 9) | def __init__(self,

FILE: mmdet/models/detectors/fovea.py
  class FOVEA (line 6) | class FOVEA(SingleStageDetector):
    method __init__ (line 9) | def __init__(self,

FILE: mmdet/models/detectors/fsaf.py
  class FSAF (line 6) | class FSAF(SingleStageDetector):
    method __init__ (line 9) | def __init__(self,

FILE: mmdet/models/detectors/gfl.py
  class GFL (line 6) | class GFL(SingleStageDetector):
    method __init__ (line 8) | def __init__(self,

FILE: mmdet/models/detectors/grid_rcnn.py
  class GridRCNN (line 6) | class GridRCNN(TwoStageDetector):
    method __init__ (line 14) | def __init__(self,

FILE: mmdet/models/detectors/htc.py
  class HybridTaskCascade (line 6) | class HybridTaskCascade(CascadeRCNN):
    method __init__ (line 9) | def __init__(self, **kwargs):
    method with_semantic (line 13) | def with_semantic(self):

FILE: mmdet/models/detectors/kd_one_stage.py
  class KnowledgeDistillationSingleStageDetector (line 11) | class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
    method __init__ (line 22) | def __init__(self,
    method forward_train (line 43) | def forward_train(self,
    method cuda (line 75) | def cuda(self, device=None):
    method train (line 81) | def train(self, mode=True):
    method __setattr__ (line 89) | def __setattr__(self, name, value):

FILE: mmdet/models/detectors/mask_rcnn.py
  class MaskRCNN (line 6) | class MaskRCNN(TwoStageDetector):
    method __init__ (line 9) | def __init__(self,

FILE: mmdet/models/detectors/mask_scoring_rcnn.py
  class MaskScoringRCNN (line 6) | class MaskScoringRCNN(TwoStageDetector):
    method __init__ (line 12) | def __init__(self,

FILE: mmdet/models/detectors/nasfcos.py
  class NASFCOS (line 6) | class NASFCOS(SingleStageDetector):
    method __init__ (line 12) | def __init__(self,

FILE: mmdet/models/detectors/paa.py
  class PAA (line 6) | class PAA(SingleStageDetector):
    method __init__ (line 9) | def __init__(self,

FILE: mmdet/models/detectors/point_rend.py
  class PointRend (line 6) | class PointRend(TwoStageDetector):
    method __init__ (line 14) | def __init__(self,

FILE: mmdet/models/detectors/query_based.py
  class QueryBased (line 7) | class QueryBased(SparseRCNN):

FILE: mmdet/models/detectors/reppoints_detector.py
  class RepPointsDetector (line 6) | class RepPointsDetector(SingleStageDetector):
    method __init__ (line 13) | def __init__(self,

FILE: mmdet/models/detectors/retinanet.py
  class RetinaNet (line 6) | class RetinaNet(SingleStageDetector):
    method __init__ (line 9) | def __init__(self,

FILE: mmdet/models/detectors/rpn.py
  class RPN (line 11) | class RPN(BaseDetector):
    method __init__ (line 14) | def __init__(self,
    method extract_feat (line 33) | def extract_feat(self, img):
    method forward_dummy (line 48) | def forward_dummy(self, img):
    method forward_train (line 54) | def forward_train(self,
    method simple_test (line 85) | def simple_test(self, img, img_metas, rescale=False):
    method aug_test (line 111) | def aug_test(self, imgs, img_metas, rescale=False):
    method show_result (line 136) | def show_result(self, data, result, top_k=20, **kwargs):

FILE: mmdet/models/detectors/scnet.py
  class SCNet (line 6) | class SCNet(CascadeRCNN):
    method __init__ (line 9) | def __init__(self, **kwargs):

FILE: mmdet/models/detectors/single_stage.py
  class SingleStageDetector (line 9) | class SingleStageDetector(BaseDetector):
    method __init__ (line 16) | def __init__(self,
    method extract_feat (line 35) | def extract_feat(self, img):
    method forward_dummy (line 42) | def forward_dummy(self, img):
    method forward_train (line 51) | def forward_train(self,
    method simple_test (line 81) | def simple_test(self, img, img_metas, rescale=False):
    method aug_test (line 114) | def aug_test(self, imgs, img_metas, rescale=False):

FILE: mmdet/models/detectors/sparse_rcnn.py
  class SparseRCNN (line 6) | class SparseRCNN(TwoStageDetector):
    method __init__ (line 10) | def __init__(self, *args, **kwargs):
    method forward_train (line 14) | def forward_train(self,
    method simple_test (line 66) | def simple_test(self, img, img_metas, rescale=False):
    method forward_dummy (line 92) | def forward_dummy(self, img):

FILE: mmdet/models/detectors/trident_faster_rcnn.py
  class TridentFasterRCNN (line 6) | class TridentFasterRCNN(FasterRCNN):
    method __init__ (line 9) | def __init__(self,
    method simple_test (line 33) | def simple_test(self, img, img_metas, proposals=None, rescale=False):
    method aug_test (line 47) | def aug_test(self, imgs, img_metas, rescale=False):
    method forward_train (line 60) | def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):

FILE: mmdet/models/detectors/two_stage.py
  class TwoStageDetector (line 8) | class TwoStageDetector(BaseDetector):
    method __init__ (line 15) | def __init__(self,
    method with_rpn (line 50) | def with_rpn(self):
    method with_roi_head (line 55) | def with_roi_head(self):
    method extract_feat (line 59) | def extract_feat(self, img):
    method forward_dummy (line 66) | def forward_dummy(self, img):
    method forward_train (line 84) | def forward_train(self,
    method async_simple_test (line 148) | async def async_simple_test(self,
    method simple_test (line 166) | def simple_test(self, img, img_metas, proposals=None, rescale=False):
    method aug_test (line 185) | def aug_test(self, imgs, img_metas, rescale=False):

FILE: mmdet/models/detectors/vfnet.py
  class VFNet (line 6) | class VFNet(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/yolact.py
  class YOLACT (line 9) | class YOLACT(SingleStageDetector):
    method __init__ (line 12) | def __init__(self,
    method forward_dummy (line 27) | def forward_dummy(self, img):
    method forward_train (line 34) | def forward_train(self,
    method simple_test (line 93) | def simple_test(self, img, img_metas, rescale=False):
    method aug_test (line 139) | def aug_test(self, imgs, img_metas, rescale=False):

FILE: mmdet/models/detectors/yolo.py
  class YOLOV3 (line 8) | class YOLOV3(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/yolof.py
  class YOLOF (line 6) | class YOLOF(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/losses/accuracy.py
  function accuracy (line 6) | def accuracy(pred, target, topk=1, thresh=None):
  class Accuracy (line 53) | class Accuracy(nn.Module):
    method __init__ (line 55) | def __init__(self, topk=(1, ), thresh=None):
    method forward (line 68) | def forward(self, pred, target):

FILE: mmdet/models/losses/ae_loss.py
  function ae_loss_per_image (line 10) | def ae_loss_per_image(tl_preds, br_preds, match):
  class AssociativeEmbeddingLoss (line 74) | class AssociativeEmbeddingLoss(nn.Module):
    method __init__ (line 87) | def __init__(self, pull_weight=0.25, push_weight=0.25):
    method forward (line 92) | def forward(self, pred, target, match):

FILE: mmdet/models/losses/balanced_l1_loss.py
  function balanced_l1_loss (line 12) | def balanced_l1_loss(pred,
  class BalancedL1Loss (line 53) | class BalancedL1Loss(nn.Module):
    method __init__ (line 70) | def __init__(self,
    method forward (line 83) | def forward(self,

FILE: mmdet/models/losses/cross_entropy_loss.py
  function cross_entropy (line 9) | def cross_entropy(pred,
  function _expand_onehot_labels (line 42) | def _expand_onehot_labels(labels, label_weights, label_channels):
  function binary_cross_entropy (line 58) | def binary_cross_entropy(pred,
  function mask_cross_entropy (line 94) | def mask_cross_entropy(pred,
  class CrossEntropyLoss (line 142) | class CrossEntropyLoss(nn.Module):
    method __init__ (line 144) | def __init__(self,
    method forward (line 178) | def forward(self,

FILE: mmdet/models/losses/focal_loss.py
  function py_sigmoid_focal_loss (line 11) | def py_sigmoid_focal_loss(pred,
  function sigmoid_focal_loss (line 59) | def sigmoid_focal_loss(pred,
  class FocalLoss (line 106) | class FocalLoss(nn.Module):
    method __init__ (line 108) | def __init__(self,
    method forward (line 136) | def forward(self,

FILE: mmdet/models/losses/gaussian_focal_loss.py
  function gaussian_focal_loss (line 10) | def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
  class GaussianFocalLoss (line 32) | class GaussianFocalLoss(nn.Module):
    method __init__ (line 49) | def __init__(self,
    method forward (line 60) | def forward(self,

FILE: mmdet/models/losses/gfocal_loss.py
  function quality_focal_loss (line 11) | def quality_focal_loss(pred, target, beta=2.0):
  function distribution_focal_loss (line 56) | def distribution_focal_loss(pred, label):
  class QualityFocalLoss (line 81) | class QualityFocalLoss(nn.Module):
    method __init__ (line 95) | def __init__(self,
    method forward (line 107) | def forward(self,
  class DistributionFocalLoss (line 146) | class DistributionFocalLoss(nn.Module):
    method __init__ (line 156) | def __init__(self, reduction='mean', loss_weight=1.0):
    method forward (line 161) | def forward(self,

FILE: mmdet/models/losses/ghm_loss.py
  function _expand_onehot_labels (line 8) | def _expand_onehot_labels(labels, label_weights, label_channels):
  class GHMC (line 21) | class GHMC(nn.Module):
    method __init__ (line 35) | def __init__(self, bins=10, momentum=0, use_sigmoid=True, loss_weight=...
    method forward (line 50) | def forward(self, pred, target, label_weight, *args, **kwargs):
  class GHMR (line 99) | class GHMR(nn.Module):
    method __init__ (line 113) | def __init__(self, mu=0.02, bins=10, momentum=0, loss_weight=1.0):
    method forward (line 127) | def forward(self, pred, target, label_weight, avg_factor=None):

FILE: mmdet/models/losses/iou_loss.py
  function iou_loss (line 14) | def iou_loss(pred, target, linear=False, eps=1e-6):
  function bounded_iou_loss (line 41) | def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
  function giou_loss (line 87) | def giou_loss(pred, target, eps=1e-7):
  function diou_loss (line 107) | def diou_loss(pred, target, eps=1e-7):
  function ciou_loss (line 162) | def ciou_loss(pred, target, eps=1e-7):
  class IoULoss (line 223) | class IoULoss(nn.Module):
    method __init__ (line 236) | def __init__(self,
    method forward (line 247) | def forward(self,
  class BoundedIoULoss (line 294) | class BoundedIoULoss(nn.Module):
    method __init__ (line 296) | def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1...
    method forward (line 303) | def forward(self,
  class GIoULoss (line 330) | class GIoULoss(nn.Module):
    method __init__ (line 332) | def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
    method forward (line 338) | def forward(self,
  class DIoULoss (line 370) | class DIoULoss(nn.Module):
    method __init__ (line 372) | def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
    method forward (line 378) | def forward(self,
  class CIoULoss (line 410) | class CIoULoss(nn.Module):
    method __init__ (line 412) | def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
    method forward (line 418) | def forward(self,

FILE: mmdet/models/losses/kd_loss.py
  function knowledge_distillation_kl_div_loss (line 11) | def knowledge_distillation_kl_div_loss(pred,
  class KnowledgeDistillationKLDivLoss (line 39) | class KnowledgeDistillationKLDivLoss(nn.Module):
    method __init__ (line 48) | def __init__(self, reduction='mean', loss_weight=1.0, T=10):
    method forward (line 55) | def forward(self,

FILE: mmdet/models/losses/mse_loss.py
  function mse_loss (line 9) | def mse_loss(pred, target):
  class MSELoss (line 15) | class MSELoss(nn.Module):
    method __init__ (line 24) | def __init__(self, reduction='mean', loss_weight=1.0):
    method forward (line 29) | def forward(self, pred, target, weight=None, avg_factor=None):

FILE: mmdet/models/losses/pisa_loss.py
  function isr_p (line 8) | def isr_p(cls_score,
  function carl_loss (line 122) | def carl_loss(cls_score,

FILE: mmdet/models/losses/smooth_l1_loss.py
  function smooth_l1_loss (line 11) | def smooth_l1_loss(pred, target, beta=1.0):
  function l1_loss (line 33) | def l1_loss(pred, target):
  class SmoothL1Loss (line 49) | class SmoothL1Loss(nn.Module):
    method __init__ (line 60) | def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
    method forward (line 66) | def forward(self,
  class L1Loss (line 101) | class L1Loss(nn.Module):
    method __init__ (line 110) | def __init__(self, reduction='mean', loss_weight=1.0):
    method forward (line 115) | def forward(self,

FILE: mmdet/models/losses/utils.py
  function reduce_loss (line 7) | def reduce_loss(loss, reduction):
  function weight_reduce_loss (line 28) | def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=N...
  function weighted_loss (line 57) | def weighted_loss(loss_func):

FILE: mmdet/models/losses/varifocal_loss.py
  function varifocal_loss (line 10) | def varifocal_loss(pred,
  class VarifocalLoss (line 59) | class VarifocalLoss(nn.Module):
    method __init__ (line 61) | def __init__(self,
    method forward (line 96) | def forward(self,

FILE: mmdet/models/necks/bfp.py
  class BFP (line 10) | class BFP(BaseModule):
    method __init__ (line 32) | def __init__(self,
    method forward (line 69) | def forward(self, inputs):

FILE: mmdet/models/necks/channel_mapper.py
  class ChannelMapper (line 9) | class ChannelMapper(BaseModule):
    method __init__ (line 45) | def __init__(self,
    method forward (line 89) | def forward(self, inputs):

FILE: mmdet/models/necks/dilated_encoder.py
  class Bottleneck (line 9) | class Bottleneck(nn.Module):
    method __init__ (line 23) | def __init__(self,
    method forward (line 41) | def forward(self, x):
  class DilatedEncoder (line 51) | class DilatedEncoder(nn.Module):
    method __init__ (line 66) | def __init__(self, in_channels, out_channels, block_mid_channels,
    method _init_layers (line 76) | def _init_layers(self):
    method init_weights (line 93) | def init_weights(self):
    method forward (line 104) | def forward(self, feature):

FILE: mmdet/models/necks/fpg.py
  class Transition (line 9) | class Transition(BaseModule):
    method __init__ (line 17) | def __init__(self, in_channels, out_channels, init_cfg=None):
    method forward (line 22) | def forward(x):
  class UpInterpolationConv (line 26) | class UpInterpolationConv(Transition):
    method __init__ (line 42) | def __init__(self,
    method forward (line 62) | def forward(self, x):
  class LastConv (line 72) | class LastConv(Transition):
    method __init__ (line 82) | def __init__(self,
    method forward (line 98) | def forward(self, inputs):
  class FPG (line 104) | class FPG(BaseModule):
    method __init__ (line 149) | def __init__(self,
    method build_trans (line 316) | def build_trans(self, cfg, in_channels, out_channels, **extra_args):
    method fuse (line 322) | def fuse(self, fuse_dict):
    method forward (line 332) | def forward(self, inputs):

FILE: mmdet/models/necks/fpn.py
  class FPN (line 12) | class FPN(BaseModule):
    method __init__ (line 67) | def __init__(self,
    method forward (line 166) | def forward(self, inputs):

FILE: mmdet/models/necks/fpn_carafe.py
  class FPN_CARAFE (line 10) | class FPN_CARAFE(BaseModule):
    method __init__ (line 36) | def __init__(self,
    method init_weights (line 208) | def init_weights(self):
    method slice_as (line 218) | def slice_as(self, src, dst):
    method tensor_add (line 238) | def tensor_add(self, a, b):
    method forward (line 246) | def forward(self, inputs):

FILE: mmdet/models/necks/hrfpn.py
  class HRFPN (line 12) | class HRFPN(BaseModule):
    method __init__ (line 32) | def __init__(self,
    method forward (line 76) | def forward(self, inputs):

FILE: mmdet/models/necks/identity_fpn.py
  class ChannelMapping (line 13) | class ChannelMapping(BaseModule):
    method __init__ (line 18) | def __init__(self,
    method forward (line 101) | def forward(self, inputs):

FILE: mmdet/models/necks/nas_fpn.py
  class NASFPN (line 10) | class NASFPN(BaseModule):
    method __init__ (line 32) | def __init__(self,
    method forward (line 126) | def forward(self, inputs):

FILE: mmdet/models/necks/nasfcos_fpn.py
  class NASFCOS_FPN (line 11) | class NASFCOS_FPN(BaseModule):
    method __init__ (line 34) | def __init__(self,
    method forward (line 121) | def forward(self, inputs):
    method init_weights (line 155) | def init_weights(self):

FILE: mmdet/models/necks/pafpn.py
  class PAFPN (line 11) | class PAFPN(FPN):
    method __init__ (line 40) | def __init__(self,
    method forward (line 96) | def forward(self, inputs):

FILE: mmdet/models/necks/rfp.py
  class ASPP (line 11) | class ASPP(BaseModule):
    method __init__ (line 25) | def __init__(self,
    method forward (line 47) | def forward(self, x):
  class RFP (line 59) | class RFP(FPN):
    method __init__ (line 77) | def __init__(self,
    method init_weights (line 104) | def init_weights(self):
    method forward (line 116) | def forward(self, inputs):

FILE: mmdet/models/necks/yolo_neck.py
  class DetectionBlock (line 11) | class DetectionBlock(BaseModule):
    method __init__ (line 34) | def __init__(self,
    method forward (line 54) | def forward(self, x):
  class YOLOV3Neck (line 64) | class YOLOV3Neck(BaseModule):
    method __init__ (line 90) | def __init__(self,
    method forward (line 117) | def forward(self, feats):

FILE: mmdet/models/roi_heads/adamixer_decoder.py
  class AdaMixerDecoder (line 13) | class AdaMixerDecoder(CascadeRoIHead):
    method __init__ (line 16) | def __init__(self,
    method _bbox_forward (line 54) | def _bbox_forward(self, stage, img_feat, query_xyzr, query_content, im...
    method forward_train (line 86) | def forward_train(self,
    method simple_test (line 147) | def simple_test(self,
    method aug_test (line 199) | def aug_test(self, x, bboxes_list, img_metas, rescale=False):
    method forward_dummy (line 202) | def forward_dummy(self, x,

FILE: mmdet/models/roi_heads/base_roi_head.py
  class BaseRoIHead (line 8) | class BaseRoIHead(BaseModule, metaclass=ABCMeta):
    method __init__ (line 11) | def __init__(self,
    method with_bbox (line 37) | def with_bbox(self):
    method with_mask (line 42) | def with_mask(self):
    method with_shared_head (line 47) | def with_shared_head(self):
    method init_bbox_head (line 52) | def init_bbox_head(self):
    method init_mask_head (line 57) | def init_mask_head(self):
    method init_assigner_sampler (line 62) | def init_assigner_sampler(self):
    method forward_train (line 67) | def forward_train(self,
    method async_simple_test (line 78) | async def async_simple_test(self,
    method simple_test (line 88) | def simple_test(self,
    method aug_test (line 97) | def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):

FILE: mmdet/models/roi_heads/bbox_heads/adamixer_decoder_stage.py
  function dprint (line 26) | def dprint(*args, **kwargs):
  function decode_box (line 32) | def decode_box(xyzr):
  function make_sample_points (line 42) | def make_sample_points(offset, num_group, xyzr):
  class AdaptiveSamplingMixing (line 69) | class AdaptiveSamplingMixing(nn.Module):
    method __init__ (line 72) | def __init__(self,
    method init_weights (line 103) | def init_weights(self):
    method forward (line 131) | def forward(self, x, query_feat, query_xyzr, featmap_strides):
  function position_embedding (line 159) | def position_embedding(token_xyzr, num_feats, temperature=10000):
  class AdaMixerDecoderStage (line 174) | class AdaMixerDecoderStage(BBoxHead):
    method __init__ (line 177) | def __init__(self,
    method init_weights (line 257) | def init_weights(self):
    method forward (line 276) | def forward(self,
    method refine_xyzr (line 327) | def refine_xyzr(self, xyzr, xyzr_delta, return_bbox=True):
    method loss (line 338) | def loss(self,
    method _get_target_single (line 387) | def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes,
    method get_targets (line 418) | def get_targets(self,

FILE: mmdet/models/roi_heads/bbox_heads/adaptive_mixing_operator.py
  function dprint (line 6) | def dprint(*args, **kwargs):
  class SRShadowForFlops (line 17) | class SRShadowForFlops(nn.Module):
    method __init__ (line 18) | def __init__(self, in_dim, in_points, n_groups, query_dim=None,
    method forward (line 32) | def forward(self, x, query):
    method __user_flops_handle__ (line 36) | def __user_flops_handle__(module, input, output):
  class AdaptiveMixing (line 50) | class AdaptiveMixing(nn.Module):
    method __init__ (line 51) | def __init__(self, in_dim, in_points, n_groups, query_dim=None,
    method init_weights (line 92) | def init_weights(self):
    method forward (line 95) | def forward(self, x, query):

FILE: mmdet/models/roi_heads/bbox_heads/bbox_head.py
  class BBoxHead (line 13) | class BBoxHead(BaseModule):
    method __init__ (line 17) | def __init__(self,
    method forward (line 81) | def forward(self, x):
    method _get_target_single (line 89) | def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,
    method get_targets (line 155) | def get_targets(self,
    method loss (line 223) | def loss(self,
    method get_bboxes (line 274) | def get_bboxes(self,
    method refine_bboxes (line 420) | def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
    method regress_by_class (line 499) | def regress_by_class(self, rois, label, bbox_pred, img_meta):

FILE: mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
  class ConvFCBBoxHead (line 9) | class ConvFCBBoxHead(BBoxHead):
    method __init__ (line 20) | def __init__(self,
    method _add_conv_fc_branch (line 99) | def _add_conv_fc_branch(self,
    method forward (line 140) | def forward(self, x):
  class Shared2FCBBoxHead (line 182) | class Shared2FCBBoxHead(ConvFCBBoxHead):
    method __init__ (line 184) | def __init__(self, fc_out_channels=1024, *args, **kwargs):
  class Shared4Conv1FCBBoxHead (line 198) | class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
    method __init__ (line 200) | def __init__(self, fc_out_channels=1024, *args, **kwargs):

FILE: mmdet/models/roi_heads/bbox_heads/dii_head.py
  class DIIHead (line 17) | class DIIHead(BBoxHead):
    method __init__ (line 45) | def __init__(self,
    method init_weights (line 124) | def init_weights(self):
    method forward (line 140) | def forward(self, roi_feat, proposal_feat):
    method loss (line 197) | def loss(self,
    method _get_target_single (line 281) | def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes,
    method get_targets (line 354) | def get_targets(self,

FILE: mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
  class BasicResBlock (line 10) | class BasicResBlock(BaseModule):
    method __init__ (line 25) | def __init__(self,
    method forward (line 62) | def forward(self, x):
  class DoubleConvFCBBoxHead (line 76) | class DoubleConvFCBBoxHead(BBoxHead):
    method __init__ (line 90) | def __init__(self,
    method _add_conv_branch (line 135) | def _add_conv_branch(self):
    method _add_fc_branch (line 147) | def _add_fc_branch(self):
    method forward (line 157) | def forward(self, x_cls, x_reg):

FILE: mmdet/models/roi_heads/bbox_heads/sabl_head.py
  class SABLHead (line 14) | class SABLHead(BaseModule):
    method __init__ (line 57) | def __init__(self,
    method _add_fc_branch (line 208) | def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,
    method cls_forward (line 217) | def cls_forward(self, cls_x):
    method attention_pool (line 224) | def attention_pool(self, reg_x):
    method side_aware_feature_extractor (line 237) | def side_aware_feature_extractor(self, reg_x):
    method reg_pred (line 258) | def reg_pred(self, x, offset_fcs, cls_fcs):
    method side_aware_split (line 276) | def side_aware_split(self, feat):
    method bbox_pred_split (line 288) | def bbox_pred_split(self, bbox_pred, num_proposals_per_img):
    method reg_forward (line 297) | def reg_forward(self, reg_x):
    method forward (line 316) | def forward(self, x):
    method get_targets (line 323) | def get_targets(self, sampling_results, gt_bboxes, gt_labels,
    method bucket_target (line 338) | def bucket_target(self,
    method _bucket_target_single (line 364) | def _bucket_target_single(self, pos_proposals, neg_proposals,
    method loss (line 427) | def loss(self,
    method get_bboxes (line 472) | def get_bboxes(self,
    method refine_bboxes (line 514) | def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
    method regress_by_class (line 560) | def regress_by_class(self, rois, label, bbox_pred, img_meta):

FILE: mmdet/models/roi_heads/bbox_heads/sampling_3d_operator.py
  function sampling_each_level (line 7) | def sampling_each_level(sample_points: torch.Tensor,
  function translate_to_linear_weight (line 48) | def translate_to_linear_weight(ref: torch.Tensor, num_total,
  function sampling_3d (line 65) | def sampling_3d(

FILE: mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py
  class SCNetBBoxHead (line 6) | class SCNetBBoxHead(ConvFCBBoxHead):
    method _forward_shared (line 13) | def _forward_shared(self, x):
    method _forward_cls_reg (line 30) | def _forward_cls_reg(self, x):
    method forward (line 58) | def forward(self, x, return_shared_feat=False):

FILE: mmdet/models/roi_heads/cascade_roi_head.py
  class CascadeRoIHead (line 14) | class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
    method __init__ (line 20) | def __init__(self,
    method init_bbox_head (line 50) | def init_bbox_head(self, bbox_roi_extractor, bbox_head):
    method init_mask_head (line 70) | def init_mask_head(self, mask_roi_extractor, mask_head):
    method init_assigner_sampler (line 98) | def init_assigner_sampler(self):
    method forward_dummy (line 110) | def forward_dummy(self, x, proposals):
    method _bbox_forward (line 128) | def _bbox_forward(self, stage, x, rois):
    method _bbox_forward_train (line 141) | def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,
    method _mask_forward (line 156) | def _mask_forward(self, stage, x, rois):
    method _mask_forward_train (line 168) | def _mask_forward_train(self,
    method forward_train (line 189) | def forward_train(self,
    method simple_test (line 278) | def simple_test(self, x, proposal_list, img_metas, rescale=False):
    method aug_test (line 404) | def aug_test(self, features, proposal_list, img_metas, rescale=False):

FILE: mmdet/models/roi_heads/double_roi_head.py
  class DoubleHeadRoIHead (line 6) | class DoubleHeadRoIHead(StandardRoIHead):
    method __init__ (line 12) | def __init__(self, reg_roi_scale_factor, **kwargs):
    method _bbox_forward (line 16) | def _bbox_forward(self, x, rois):

FILE: mmdet/models/roi_heads/dynamic_roi_head.py
  class DynamicRoIHead (line 13) | class DynamicRoIHead(StandardRoIHead):
    method __init__ (line 16) | def __init__(self, **kwargs):
    method forward_train (line 24) | def forward_train(self,
    method _bbox_forward_train (line 108) | def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
    method update_hyperparameters (line 133) | def update_hyperparameters(self):

FILE: mmdet/models/roi_heads/grid_roi_head.py
  class GridRoIHead (line 9) | class GridRoIHead(StandardRoIHead):
    method __init__ (line 15) | def __init__(self, grid_roi_extractor, grid_head, **kwargs):
    method _random_jitter (line 26) | def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):
    method forward_dummy (line 51) | def forward_dummy(self, x, proposals):
    method _bbox_forward_train (line 77) | def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
    method simple_test (line 115) | def simple_test(self,

FILE: mmdet/models/roi_heads/htc_roi_head.py
  class HybridTaskCascadeRoIHead (line 11) | class HybridTaskCascadeRoIHead(CascadeRoIHead):
    method __init__ (line 17) | def __init__(self,
    method with_semantic (line 41) | def with_semantic(self):
    method forward_dummy (line 48) | def forward_dummy(self, x, proposals):
    method _bbox_forward_train (line 83) | def _bbox_forward_train(self,
    method _mask_forward_train (line 110) | def _mask_forward_train(self,
    method _bbox_forward (line 155) | def _bbox_forward(self, stage, x, rois, semantic_feat=None):
    method _mask_forward_test (line 173) | def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):
    method forward_train (line 202) | def forward_train(self,
    method simple_test (line 327) | def simple_test(self, x, proposal_list, img_metas, rescale=False):
    method aug_test (line 461) | def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):

FILE: mmdet/models/roi_heads/mask_heads/coarse_mask_head.py
  class CoarseMaskHead (line 9) | class CoarseMaskHead(FCNMaskHead):
    method __init__ (line 25) | def __init__(self,
    method init_weights (line 83) | def init_weights(self):
    method forward (line 87) | def forward(self, x):

FILE: mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
  class FCNMaskHead (line 22) | class FCNMaskHead(BaseModule):
    method __init__ (line 24) | def __init__(self,
    method init_weights (line 111) | def init_weights(self):
    method forward (line 124) | def forward(self, x):
    method get_targets (line 134) | def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
    method loss (line 144) | def loss(self, mask_pred, mask_targets, labels):
    method get_seg_masks (line 175) | def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
  function _do_paste_mask (line 325) | def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):

FILE: mmdet/models/roi_heads/mask_heads/feature_relay_head.py
  class FeatureRelayHead (line 8) | class FeatureRelayHead(BaseModule):
    method __init__ (line 21) | def __init__(self,
    method forward (line 42) | def forward(self, x):

FILE: mmdet/models/roi_heads/mask_heads/fused_semantic_head.py
  class FusedSemanticHead (line 10) | class FusedSemanticHead(BaseModule):
    method __init__ (line 26) | def __init__(self,
    method forward (line 85) | def forward(self, feats):
    method loss (line 102) | def loss(self, mask_pred, labels):

FILE: mmdet/models/roi_heads/mask_heads/global_context_head.py
  class GlobalContextHead (line 10) | class GlobalContextHead(BaseModule):
    method __init__ (line 28) | def __init__(self,
    method forward (line 79) | def forward(self, feats):
    method loss (line 93) | def loss(self, pred, labels):

FILE: mmdet/models/roi_heads/mask_heads/grid_head.py
  class GridHead (line 12) | class GridHead(BaseModule):
    method __init__ (line 14) | def __init__(self,
    method forward (line 154) | def forward(self, x):
    method calc_sub_regions (line 192) | def calc_sub_regions(self):
    method get_targets (line 223) | def get_targets(self, sampling_results, rcnn_train_cfg):
    method loss (line 291) | def loss(self, grid_pred, grid_targets):
    method get_bboxes (line 297) | def get_bboxes(self, det_bboxes, grid_pred, img_metas):

FILE: mmdet/models/roi_heads/mask_heads/htc_mask_head.py
  class HTCMaskHead (line 8) | class HTCMaskHead(FCNMaskHead):
    method __init__ (line 10) | def __init__(self, with_conv_res=True, *args, **kwargs):
    method forward (line 21) | def forward(self, x, res_feat=None, return_logits=True, return_feat=Tr...

FILE: mmdet/models/roi_heads/mask_heads/mask_point_head.py
  class MaskPointHead (line 13) | class MaskPointHead(BaseModule):
    method __init__ (line 39) | def __init__(self,
    method forward (line 85) | def forward(self, fine_grained_feats, coarse_feats):
    method get_targets (line 106) | def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
    method _get_target_single (line 146) | def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
    method loss (line 164) | def loss(self, point_pred, point_targets, labels):
    method _get_uncertainty (line 187) | def _get_uncertainty(self, mask_pred, labels):
    method get_roi_rel_points_train (line 212) | def get_roi_rel_points_train(self, mask_pred, labels, cfg):
    method get_roi_rel_points_test (line 267) | def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):

FILE: mmdet/models/roi_heads/mask_heads/maskiou_head.py
  class MaskIoUHead (line 12) | class MaskIoUHead(BaseModule):
    method __init__ (line 18) | def __init__(self,
    method forward (line 72) | def forward(self, mask_feat, mask_pred):
    method loss (line 87) | def loss(self, mask_iou_pred, mask_iou_targets):
    method get_targets (line 97) | def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targ...
    method _get_area_ratio (line 146) | def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
    method get_mask_scores (line 173) | def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):

FILE: mmdet/models/roi_heads/mask_heads/scnet_mask_head.py
  class SCNetMaskHead (line 7) | class SCNetMaskHead(FCNMaskHead):
    method __init__ (line 15) | def __init__(self, conv_to_res=True, **kwargs):

FILE: mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py
  class SCNetSemanticHead (line 7) | class SCNetSemanticHead(FusedSemanticHead):
    method __init__ (line 15) | def __init__(self, conv_to_res=True, **kwargs):

FILE: mmdet/models/roi_heads/mask_scoring_roi_head.py
  class MaskScoringRoIHead (line 9) | class MaskScoringRoIHead(StandardRoIHead):
    method __init__ (line 15) | def __init__(self, mask_iou_head, **kwargs):
    method _mask_forward_train (line 20) | def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
    method simple_test_mask (line 48) | def simple_test_mask(self,

FILE: mmdet/models/roi_heads/pisa_roi_head.py
  class PISARoIHead (line 8) | class PISARoIHead(StandardRoIHead):
    method forward_train (line 12) | def forward_train(self,
    method _bbox_forward (line 86) | def _bbox_forward(self, x, rois):
    method _bbox_forward_train (line 99) | def _bbox_forward_train(self,

FILE: mmdet/models/roi_heads/point_rend_roi_head.py
  class PointRendRoIHead (line 14) | class PointRendRoIHead(StandardRoIHead):
    method __init__ (line 17) | def __init__(self, point_head, *args, **kwargs):
    method init_point_head (line 22) | def init_point_head(self, point_head):
    method _mask_forward_train (line 26) | def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
    method _mask_point_forward_train (line 41) | def _mask_point_forward_train(self, x, sampling_results, mask_pred,
    method _get_fine_grained_point_feats (line 62) | def _get_fine_grained_point_feats(self, x, rois, rel_roi_points,
    method _mask_point_forward_test (line 87) | def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,
    method simple_test_mask (line 125) | def simple_test_mask(self,
    method aug_test_mask (line 178) | def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):

FILE: mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py
  class BaseRoIExtractor (line 9) | class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
    method __init__ (line 20) | def __init__(self,
    method num_inputs (line 32) | def num_inputs(self):
    method build_roi_layers (line 36) | def build_roi_layers(self, layer_cfg, featmap_strides):
    method roi_rescale (line 61) | def roi_rescale(self, rois, scale_factor):
    method forward (line 86) | def forward(self, feats, rois, roi_scale_factor=None):

FILE: mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
  class GenericRoIExtractor (line 9) | class GenericRoIExtractor(BaseRoIExtractor):
    method __init__ (line 24) | def __init__(self,
    method forward (line 43) | def forward(self, feats, rois, roi_scale_factor=None):

FILE: mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
  class SingleRoIExtractor (line 9) | class SingleRoIExtractor(BaseRoIExtractor):
    method __init__ (line 25) | def __init__(self,
    method map_roi_levels (line 35) | def map_roi_levels(self, rois, num_levels):
    method forward (line 57) | def forward(self, feats, rois, roi_scale_factor=None):

FILE: mmdet/models/roi_heads/scnet_roi_head.py
  class SCNetRoIHead (line 11) | class SCNetRoIHead(CascadeRoIHead):
    method __init__ (line 23) | def __init__(self,
    method init_mask_head (line 47) | def init_mask_head(self, mask_roi_extractor, mask_head):
    method with_semantic (line 54) | def with_semantic(self):
    method with_feat_relay (line 60) | def with_feat_relay(self):
    method with_glbctx (line 66) | def with_glbctx(self):
    method _fuse_glbctx (line 70) | def _fuse_glbctx(self, roi_feats, glbctx_feat, rois):
    method _slice_pos_feats (line 80) | def _slice_pos_feats(self, feats, sampling_results):
    method _bbox_forward (line 93) | def _bbox_forward(self,
    method _mask_forward (line 122) | def _mask_forward(self,
    method _bbox_forward_train (line 147) | def _bbox_forward_train(self,
    method _mask_forward_train (line 176) | def _mask_forward_train(self,
    method forward_train (line 203) | def forward_train(self,
    method simple_test (line 318) | def simple_test(self, x, proposal_list, img_metas, rescale=False):
    method aug_test (line 450) | def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):

FILE: mmdet/models/roi_heads/shared_heads/res_layer.py
  class ResLayer (line 12) | class ResLayer(BaseModule):
    method __init__ (line 14) | def __init__(self,
    method forward (line 69) | def forward(self, x):
    method train (line 74) | def train(self, mode=True):

FILE: mmdet/models/roi_heads/sparse_roi_head.py
  class SparseRoIHead (line 10) | class SparseRoIHead(CascadeRoIHead):
    method __init__ (line 32) | def __init__(self,
    method _bbox_forward (line 79) | def _bbox_forward(self, stage, x, rois, object_feats, img_metas):
    method forward_train (line 141) | def forward_train(self,
    method simple_test (line 226) | def simple_test(self,
    method aug_test (line 301) | def aug_test(self, features, proposal_list, img_metas, rescale=False):
    method forward_dummy (line 304) | def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas):

FILE: mmdet/models/roi_heads/standard_roi_head.py
  class StandardRoIHead (line 10) | class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
    method init_assigner_sampler (line 13) | def init_assigner_sampler(self):
    method init_bbox_head (line 22) | def init_bbox_head(self, bbox_roi_extractor, bbox_head):
    method init_mask_head (line 27) | def init_mask_head(self, mask_roi_extractor, mask_head):
    method forward_dummy (line 37) | def forward_dummy(self, x, proposals):
    method forward_train (line 53) | def forward_train(self,
    method _bbox_forward (line 116) | def _bbox_forward(self, x, rois):
    method _bbox_forward_train (line 129) | def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
    method _mask_forward_train (line 144) | def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
    method _mask_forward (line 179) | def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
    method async_simple_test (line 196) | async def async_simple_test(self,
    method simple_test (line 221) | def simple_test(self,
    method aug_test (line 252) | def aug_test(self, x, proposal_list, img_metas, rescale=False):

FILE: mmdet/models/roi_heads/test_mixins.py
  class BBoxTestMixin (line 15) | class BBoxTestMixin(object):
    method async_test_bboxes (line 19) | async def async_test_bboxes(self,
    method simple_test_bboxes (line 52) | def simple_test_bboxes(self,
    method aug_test_bboxes (line 168) | def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_c...
  class MaskTestMixin (line 203) | class MaskTestMixin(object):
    method async_test_mask (line 207) | async def async_test_mask(self,
    method simple_test_mask (line 248) | def simple_test_mask(self,
    method aug_test_mask (line 341) | def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):

FILE: mmdet/models/roi_heads/trident_roi_head.py
  class TridentRoIHead (line 11) | class TridentRoIHead(StandardRoIHead):
    method __init__ (line 21) | def __init__(self, num_branch, test_branch_idx, **kwargs):
    method merge_trident_bboxes (line 26) | def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels):
    method simple_test (line 45) | def simple_test(self,
    method aug_test_bboxes (line 82) | def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_c...

FILE: mmdet/models/utils/builder.py
  function build_transformer (line 6) | def build_transformer(cfg, default_args=None):

FILE: mmdet/models/utils/gaussian_target.py
  function gaussian2D (line 6) | def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'):
  function gen_gaussian_target (line 30) | def gen_gaussian_target(heatmap, center, radius, k=1):
  function gaussian_radius (line 66) | def gaussian_radius(det_size, min_overlap):

FILE: mmdet/models/utils/positional_encoding.py
  class SinePositionalEncoding (line 10) | class SinePositionalEncoding(BaseModule):
    method __init__ (line 35) | def __init__(self,
    method forward (line 55) | def forward(self, mask):
    method __repr__ (line 89) | def __repr__(self):
  class LearnedPositionalEncoding (line 101) | class LearnedPositionalEncoding(BaseModule):
    method __init__ (line 115) | def __init__(self,
    method forward (line 127) | def forward(self, mask):
    method __repr__ (line 151) | def __repr__(self):

FILE: mmdet/models/utils/res_layer.py
  class ResLayer (line 6) | class ResLayer(Sequential):
    method __init__ (line 25) | def __init__(self,
  class SimplifiedBasicBlock (line 106) | class SimplifiedBasicBlock(BaseModule):
    method __init__ (line 115) | def __init__(self,
    method norm1 (line 161) | def norm1(self):
    method norm2 (line 166) | def norm2(self):
    method forward (line 170) | def forward(self, x):

FILE: mmdet/models/utils/transformer.py
  function _ntuple (line 23) | def _ntuple(n):
  function nlc_to_nchw (line 40) | def nlc_to_nchw(x, hw_shape):
  function nchw_to_nlc (line 55) | def nchw_to_nlc(x):
  class AdaptivePadding (line 66) | class AdaptivePadding(nn.Module):
    method __init__ (line 96) | def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corne...
    method get_pad_shape (line 112) | def get_pad_shape(self, input_shape):
    method forward (line 124) | def forward(self, x):
  class PatchEmbed (line 137) | class PatchEmbed(BaseModule):
    method __init__ (line 163) | def __init__(
    method forward (line 237) | def forward(self, x):
  class PatchMerging (line 259) | class PatchMerging(BaseModule):
    method __init__ (line 288) | def __init__(self,
    method forward (line 337) | def forward(self, x, input_size):
  function inverse_sigmoid (line 383) | def inverse_sigmoid(x, eps=1e-5):
  class DetrTransformerDecoderLayer (line 403) | class DetrTransformerDecoderLayer(BaseTransformerLayer):
    method __init__ (line 425) | def __init__(self,
  class DetrTransformerEncoder (line 449) | class DetrTransformerEncoder(TransformerLayerSequence):
    method __init__ (line 457) | def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs):
    method forward (line 468) | def forward(self, *args, **kwargs):
  class DetrTransformerDecoder (line 481) | class DetrTransformerDecoder(TransformerLayerSequence):
    method __init__ (line 490) | def __init__(self,
    method forward (line 504) | def forward(self, query, *args, **kwargs):
  class Transformer (line 534) | class Transformer(BaseModule):
    method __init__ (line 556) | def __init__(self, encoder=None, decoder=None, init_cfg=None):
    method init_weights (line 562) | def init_weights(self):
    method forward (line 569) | def forward(self, x, mask, query_embed, pos_embed):
  class DeformableDetrTransformerDecoder (line 619) | class DeformableDetrTransformerDecoder(TransformerLayerSequence):
    method __init__ (line 628) | def __init__(self, *args, return_intermediate=False, **kwargs):
    method forward (line 633) | def forward(self,
  class DeformableDetrTransformer (line 707) | class DeformableDetrTransformer(Transformer):
    method __init__ (line 719) | def __init__(self,
    method init_layers (line 731) | def init_layers(self):
    method init_weights (line 745) | def init_weights(self):
    method gen_encoder_output_proposals (line 757) | def gen_encoder_output_proposals(self, memory, memory_padding_mask,
    method get_reference_points (line 825) | def get_reference_points(spatial_shapes, valid_ratios, device):
    method get_valid_ratio (line 859) | def get_valid_ratio(self, mask):
    method get_proposal_pos_embed (line 869) | def get_proposal_pos_embed(self,
    method forward (line 887) | def forward(self,
  class DynamicConv (line 1049) | class DynamicConv(BaseModule):
    method __init__ (line 1074) | def __init__(self,
    method forward (line 1105) | def forward(self, param_feature, input_feature):
  class DynamicConvV2 (line 1153) | class DynamicConvV2(BaseModule):
    method __init__ (line 1178) | def __init__(self,
    method forward (line 1209) | def forward(self, param_feature, input_feature):

FILE: mmdet/utils/collect_env.py
  function collect_env (line 7) | def collect_env():

FILE: mmdet/utils/contextmanagers.py
  function completed (line 16) | async def completed(trace_name='',
  function concurrent (line 91) | async def concurrent(streamqueue: asyncio.Queue,

FILE: mmdet/utils/logger.py
  function get_root_logger (line 6) | def get_root_logger(log_file=None, log_level=logging.INFO):

FILE: mmdet/utils/profiling.py
  function profile_time (line 10) | def profile_time(trace_name,

FILE: mmdet/utils/util_mixins.py
  class NiceRepr (line 41) | class NiceRepr(object):
    method __nice__ (line 75) | def __nice__(self):
    method __repr__ (line 86) | def __repr__(self):
    method __str__ (line 96) | def __str__(self):

FILE: mmdet/utils/util_random.py
  function ensure_rng (line 5) | def ensure_rng(rng=None):

FILE: mmdet/version.py
  function parse_version_info (line 7) | def parse_version_info(version_str):

FILE: setup.py
  function readme (line 10) | def readme():
  function get_version (line 19) | def get_version():
  function make_cuda_ext (line 25) | def make_cuda_ext(name, module, sources, sources_cuda=[]):
  function parse_requirements (line 50) | def parse_requirements(fname='requirements.txt', with_version=True):

FILE: tests/test_data/test_datasets/test_coco_dataset.py
  function _create_ids_error_coco_json (line 10) | def _create_ids_error_coco_json(json_name):
  function test_coco_annotation_ids_unique (line 50) | def test_coco_annotation_ids_unique():

FILE: tests/test_data/test_datasets/test_common.py
  function _create_dummy_coco_json (line 20) | def _create_dummy_coco_json(json_name):
  function _create_dummy_custom_pkl (line 80) | def _create_dummy_custom_pkl(pkl_name):
  function _create_dummy_results (line 96) | def _create_dummy_results():
  function test_dataset_init (line 106) | def test_dataset_init(config_path):
  function test_dataset_evaluation (line 120) | def test_dataset_evaluation():
  function test_evaluation_hook (line 223) | def test_evaluation_hook(EvalHookParam):
  function _build_demo_runner (line 306) | def _build_demo_runner():
  function test_allow_empty_images (line 334) | def test_allow_empty_images(classes, expected_length):

FILE: tests/test_data/test_datasets/test_custom_dataset.py
  function test_custom_classes_override_default (line 18) | def test_custom_classes_override_default(dataset):

FILE: tests/test_data/test_datasets/test_dataset_wrapper.py
  function test_dataset_wrapper (line 12) | def test_dataset_wrapper():

FILE: tests/test_data/test_datasets/test_xml_dataset.py
  function test_xml_dataset (line 6) | def test_xml_dataset():

FILE: tests/test_data/test_pipelines/test_formatting.py
  function test_default_format_bundle (line 8) | def test_default_format_bundle():

FILE: tests/test_data/test_pipelines/test_loading.py
  class TestLoading (line 11) | class TestLoading(object):
    method setup_class (line 14) | def setup_class(cls):
    method test_load_img (line 17) | def test_load_img(self):
    method test_load_multi_channel_img (line 59) | def test_load_multi_channel_img(self):
    method test_load_webcam_img (line 80) | def test_load_webcam_img(self):

FILE: tests/test_data/test_pipelines/test_sampler.py
  function test_random_sampler (line 8) | def test_random_sampler():
  function test_random_sampler_empty_gt (line 44) | def test_random_sampler_empty_gt():
  function test_random_sampler_empty_pred (line 70) | def test_random_sampler_empty_pred():
  function _context_for_ohem (line 94) | def _context_for_ohem():
  function test_ohem_sampler (line 109) | def test_ohem_sampler():
  function test_ohem_sampler_empty_gt (line 154) | def test_ohem_sampler_empty_gt():
  function test_ohem_sampler_empty_pred (line 195) | def test_ohem_sampler_empty_pred():
  function test_random_sample_result (line 235) | def test_random_sample_result():
  function test_score_hlr_sampler_empty_pred (line 249) | def test_score_hlr_sampler_empty_pred():

FILE: tests/test_data/test_pipelines/test_transform/test_img_augment.py
  function construct_toy_data (line 12) | def construct_toy_data(poly2mask=True):
  function test_adjust_color (line 42) | def test_adjust_color():
  function test_imequalize (line 73) | def test_imequalize(nb_rand_test=100):
  function test_adjust_brightness (line 108) | def test_adjust_brightness(nb_rand_test=100):
  function test_adjust_contrast (line 153) | def test_adjust_contrast(nb_rand_test=100):

FILE: tests/test_data/test_pipelines/test_transform/test_models_aug_test.py
  function model_aug_test_template (line 12) | def model_aug_test_template(cfg_file):
  function test_aug_test_size (line 44) | def test_aug_test_size():
  function test_cascade_rcnn_aug_test (line 69) | def test_cascade_rcnn_aug_test():
  function test_mask_rcnn_aug_test (line 75) | def test_mask_rcnn_aug_test():
  function test_htc_aug_test (line 83) | def test_htc_aug_test():
  function test_scnet_aug_test (line 90) | def test_scnet_aug_test():
  function test_cornernet_aug_test (line 98) | def test_cornernet_aug_test():

FILE: tests/test_data/test_pipelines/test_transform/test_rotate.py
  function construct_toy_data (line 11) | def construct_toy_data(poly2mask=True):
  function _check_fields (line 41) | def _check_fields(results, results_rotated, keys):
  function check_rotate (line 50) | def check_rotate(results, results_rotated):
  function test_rotate (line 65) | def test_rotate():

FILE: tests/test_data/test_pipelines/test_transform/test_shear.py
  function construct_toy_data (line 11) | def construct_toy_data(poly2mask=True):
  function _check_fields (line 42) | def _check_fields(results, results_sheared, keys):
  function check_shear (line 51) | def check_shear(results, results_sheared):
  function test_shear (line 67) | def test_shear():

FILE: tests/test_data/test_pipelines/test_transform/test_transform.py
  function test_resize (line 14) | def test_resize():
  function test_flip (line 82) | def test_flip():
  function test_random_crop (line 183) | def test_random_crop():
  function test_min_iou_random_crop (line 306) | def test_min_iou_random_crop():
  function test_pad (line 356) | def test_pad():
  function test_normalize (line 396) | def test_normalize():
  function test_albu_transform (line 425) | def test_albu_transform():
  function test_random_center_crop_pad (line 449) | def test_random_center_crop_pad():
  function test_multi_scale_flip_aug (line 595) | def test_multi_scale_flip_aug():
  function test_cutout (line 689) | def test_cutout():
  function test_random_shift (line 755) | def test_random_shift():

FILE: tests/test_data/test_pipelines/test_transform/test_translate.py
  function _check_keys (line 12) | def _check_keys(results, results_translated):
  function _pad (line 19) | def _pad(h, w, c, pad_val, axis=-1, dtype=np.float32):
  function _construct_img (line 29) | def _construct_img(results):
  function _construct_ann_info (line 39) | def _construct_ann_info(h=427, w=640, c=3):
  function _load_bboxes (line 75) | def _load_bboxes(results):
  function _load_labels (line 85) | def _load_labels(results):
  function _poly2mask (line 89) | def _poly2mask(mask_ann, img_h, img_w):
  function _process_polygons (line 105) | def _process_polygons(polygons):
  function _load_masks (line 114) | def _load_masks(results, poly2mask=True):
  function _construct_semantic_seg (line 127) | def _construct_semantic_seg(results):
  function construct_toy_data (line 134) | def construct_toy_data(poly2mask=True):
  function test_translate (line 148) | def test_translate():

FILE: tests/test_data/test_utils.py
  function test_replace_ImageToTensor (line 6) | def test_replace_ImageToTensor():
  function test_get_loading_pipeline (line 64) | def test_get_loading_pipeline():

FILE: tests/test_metrics/test_box_overlap.py
  function test_bbox_overlaps_2d (line 8) | def test_bbox_overlaps_2d(eps=1e-7):

FILE: tests/test_metrics/test_losses.py
  function test_ce_loss (line 7) | def test_ce_loss():
  function test_varifocal_loss (line 34) | def test_varifocal_loss():
  function test_kd_loss (line 81) | def test_kd_loss():
  function test_accuracy (line 112) | def test_accuracy():

FILE: tests/test_models/test_backbones/test_hourglass.py
  function test_hourglass_backbone (line 7) | def test_hourglass_backbone():

FILE: tests/test_models/test_backbones/test_regnet.py
  function test_regnet_backbone (line 35) | def test_regnet_backbone(arch_name, arch, out_channels):

FILE: tests/test_models/test_backbones/test_renext.py
  function test_renext_bottleneck (line 9) | def test_renext_bottleneck():
  function test_resnext_backbone (line 58) | def test_resnext_backbone():

FILE: tests/test_models/test_backbones/test_res2net.py
  function test_res2net_bottle2neck (line 9) | def test_res2net_bottle2neck():
  function test_res2net_backbone (line 43) | def test_res2net_backbone():

FILE: tests/test_models/test_backbones/test_resnest.py
  function test_resnest_bottleneck (line 8) | def test_resnest_bottleneck():
  function test_resnest_backbone (line 26) | def test_resnest_backbone():

FILE: tests/test_models/test_backbones/test_resnet.py
  function test_resnet_basic_block (line 14) | def test_resnet_basic_block():
  function test_resnet_bottleneck (line 63) | def test_resnet_bottleneck():
  function test_simplied_basic_block (line 197) | def test_simplied_basic_block():
  function test_resnet_res_layer (line 250) | def test_resnet_res_layer():
  function test_resnest_stem (line 306) | def test_resnest_stem():
  function test_resnet_backbone (line 354) | def test_resnet_backbone():

FILE: tests/test_models/test_backbones/test_trident_resnet.py
  function test_trident_resnet_bottleneck (line 8) | def test_trident_resnet_bottleneck():
  function test_trident_resnet_backbone (line 155) | def test_trident_resnet_backbone():

FILE: tests/test_models/test_backbones/utils.py
  function is_block (line 10) | def is_block(modules):
  function is_norm (line 18) | def is_norm(modules):
  function check_norm_state (line 25) | def check_norm_state(modules, train_state):

FILE: tests/test_models/test_dense_heads/test_anchor_head.py
  function test_anchor_head_loss (line 7) | def test_anchor_head_loss():

FILE: tests/test_models/test_dense_heads/test_atss_head.py
  function test_atss_head_loss (line 7) | def test_atss_head_loss():

FILE: tests/test_models/test_dense_heads/test_autoassign_head.py
  function test_autoassign_head_loss (line 8) | def test_autoassign_head_loss():

FILE: tests/test_models/test_dense_heads/test_corner_head.py
  function test_corner_head_loss (line 7) | def test_corner_head_loss():
  function test_corner_head_encode_and_decode_heatmap (line 95) | def test_corner_head_encode_and_decode_heatmap():

FILE: tests/test_models/test_dense_heads/test_detr_head.py
  function test_detr_head_loss (line 7) | def test_detr_head_loss():

FILE: tests/test_models/test_dense_heads/test_fcos_head.py
  function test_fcos_head_loss (line 7) | def test_fcos_head_loss():

FILE: tests/test_models/test_dense_heads/test_fsaf_head.py
  function test_fsaf_head_loss (line 7) | def test_fsaf_head_loss():

FILE: tests/test_models/test_dense_heads/test_ga_anchor_head.py
  function test_ga_anchor_head_loss (line 7) | def test_ga_anchor_head_loss():

FILE: tests/test_models/test_dense_heads/test_gfl_head.py
  function test_gfl_head_loss (line 7) | def test_gfl_head_loss():

FILE: tests/test_models/test_dense_heads/test_ld_head.py
  function test_ld_head_loss (line 7) | def test_ld_head_loss():

FILE: tests/test_models/test_dense_heads/test_paa_head.py
  function test_paa_head_loss (line 9) | def test_paa_head_loss():

FILE: tests/test_models/test_dense_heads/test_pisa_head.py
  function test_pisa_retinanet_head_loss (line 8) | def test_pisa_retinanet_head_loss():
  function test_pisa_ssd_head_loss (line 75) | def test_pisa_ssd_head_loss():
  function test_pisa_roi_head_loss (line 150) | def test_pisa_roi_head_loss():

FILE: tests/test_models/test_dense_heads/test_sabl_retina_head.py
  function test_sabl_retina_head_loss (line 7) | def test_sabl_retina_head_loss():

FILE: tests/test_models/test_dense_heads/test_vfnet_head.py
  function test_vfnet_head_loss (line 7) | def test_vfnet_head_loss():

FILE: tests/test_models/test_dense_heads/test_yolact_head.py
  function test_yolact_head_loss (line 7) | def test_yolact_head_loss():

FILE: tests/test_models/test_dense_heads/test_yolof_head.py
  function test_yolof_head_loss (line 7) | def test_yolof_head_loss():

FILE: tests/test_models/test_forward.py
  function _get_config_directory (line 10) | def _get_config_directory():
  function _get_config_module (line 25) | def _get_config_module(fname):
  function _get_detector_cfg (line 34) | def _get_detector_cfg(fname):
  function test_sparse_rcnn_forward (line 45) | def test_sparse_rcnn_forward():
  function test_rpn_forward (line 106) | def test_rpn_forward():
  function test_single_stage_forward_gpu (line 148) | def test_single_stage_forward_gpu(cfg_file):
  function test_faster_rcnn_ohem_forward (line 188) | def test_faster_rcnn_ohem_forward():
  function test_two_stage_forward (line 239) | def test_two_stage_forward(cfg_file):
  function test_single_stage_forward_cpu (line 293) | def test_single_stage_forward_cpu(cfg_file):
  function _demo_mm_inputs (line 327) | def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
  function test_yolact_forward (line 405) | def test_yolact_forward():
  function test_detr_forward (line 444) | def test_detr_forward():
  function test_kd_single_stage_forward (line 499) | def test_kd_single_stage_forward():
  function test_inference_detector (line 554) | def test_inference_detector():

FILE: tests/test_models/test_loss.py
  function test_iou_type_loss_zeros_weight (line 10) | def test_iou_type_loss_zeros_weight(loss_class):

FILE: tests/test_models/test_necks.py
  function test_fpn (line 8) | def test_fpn():
  function test_channel_mapper (line 204) | def test_channel_mapper():
  function test_dilated_encoder (line 241) | def test_dilated_encoder():

FILE: tests/test_models/test_roi_heads/test_bbox_head.py
  function test_bbox_head_loss (line 10) | def test_bbox_head_loss():
  function test_bbox_head_get_bboxes (line 69) | def test_bbox_head_get_bboxes(num_sample, num_batch):
  function test_refine_boxes (line 93) | def test_refine_boxes():
  function _demodata_refine_boxes (line 216) | def _demodata_refine_boxes(n_roi, n_img, rng=0):

FILE: tests/test_models/test_roi_heads/test_mask_head.py
  function test_mask_head_loss (line 8) | def test_mask_head_loss():

FILE: tests/test_models/test_roi_heads/test_roi_extractor.py
  function test_groie (line 7) | def test_groie():

FILE: tests/test_models/test_roi_heads/test_sabl_bbox_head.py
  function test_sabl_bbox_head_loss (line 9) | def test_sabl_bbox_head_loss():

FILE: tests/test_models/test_roi_heads/utils.py
  function _dummy_bbox_sampling (line 6) | def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels):

FILE: tests/test_models/test_utils/test_position_encoding.py
  function test_sine_positional_encoding (line 8) | def test_sine_positional_encoding(num_feats=16, batch_size=2):
  function test_learned_positional_encoding (line 28) | def test_learned_positional_encoding(num_feats=16,

FILE: tests/test_models/test_utils/test_transformer.py
  function test_detr_transformer_dencoder_encoder_layer (line 9) | def test_detr_transformer_dencoder_encoder_layer():
  function test_transformer (line 75) | def test_transformer():

FILE: tests/test_onnx/test_head.py
  function retinanet_config (line 22) | def retinanet_config():
  function test_retina_head_forward_single (line 53) | def test_retina_head_forward_single():
  function test_retina_head_forward (line 62) | def test_retina_head_forward():
  function test_retinanet_head_get_bboxes (line 75) | def test_retinanet_head_get_bboxes():
  function yolo_config (line 103) | def yolo_config():
  function test_yolov3_head_forward (line 135) | def test_yolov3_head_forward():
  function test_yolov3_head_get_bboxes (line 147) | def test_yolov3_head_get_bboxes():
  function fcos_config (line 170) | def fcos_config():
  function test_fcos_head_forward_single (line 186) | def test_fcos_head_forward_single():
  function test_fcos_head_forward (line 198) | def test_fcos_head_forward():
  function test_fcos_head_get_bboxes (line 209) | def test_fcos_head_get_bboxes():
  function fsaf_config (line 238) | def fsaf_config():
  function test_fsaf_head_forward_single (line 261) | def test_fsaf_head_forward_single():
  function test_fsaf_head_forward (line 269) | def test_fsaf_head_forward():
  function test_fsaf_head_get_bboxes (line 281) | def test_fsaf_head_get_bboxes():
  function ssd_config (line 309) | def ssd_config():
  function test_ssd_head_forward (line 342) | def test_ssd_head_forward():
  function test_ssd_head_get_bboxes (line 355) | def test_ssd_head_get_bboxes():

FILE: tests/test_onnx/test_neck.py
  function fpn_neck_config (line 34) | def fpn_neck_config(test_step_name):
  function yolo_neck_config (line 102) | def yolo_neck_config(test_step_name):
  function test_fpn_normal (line 120) | def test_fpn_normal():
  function test_fpn_wo_extra_convs (line 125) | def test_fpn_wo_extra_convs():
  function test_fpn_lateral_bns (line 130) | def test_fpn_lateral_bns():
  function test_fpn_bilinear_upsample (line 135) | def test_fpn_bilinear_upsample():
  function test_fpn_scale_factor (line 140) | def test_fpn_scale_factor():
  function test_fpn_extra_convs_inputs (line 145) | def test_fpn_extra_convs_inputs():
  function test_fpn_extra_convs_laterals (line 150) | def test_fpn_extra_convs_laterals():
  function test_fpn_extra_convs_outputs (line 155) | def test_fpn_extra_convs_outputs():
  function test_yolo_normal (line 160) | def test_yolo_normal():

FILE: tests/test_onnx/utils.py
  class WrapFunction (line 20) | class WrapFunction(nn.Module):
    method __init__ (line 23) | def __init__(self, wrapped_function):
    method forward (line 27) | def forward(self, *args, **kwargs):
  function ort_validate (line 31) | def ort_validate(model, feats, onnx_io='tmp.onnx'):
  function get_ort_model_output (line 87) | def get_ort_model_output(feat, onnx_io='tmp.onnx'):
  function convert_result_list (line 117) | def convert_result_list(outputs):

FILE: tests/test_runtime/async_benchmark.py
  function main (line 15) | async def main():

FILE: tests/test_runtime/test_async.py
  class AsyncTestCase (line 17) | class AsyncTestCase(asynctest.TestCase):
    method _run_test_method (line 23) | def _run_test_method(self, method):
  class MaskRCNNDetector (line 30) | class MaskRCNNDetector:
    method __init__ (line 32) | def __init__(self,
    method init (line 45) | async def init(self):
    method apredict (line 53) | async def apredict(self, img):
  class AsyncInferenceTestCase (line 61) | class AsyncInferenceTestCase(AsyncTestCase):
    method test_simple_inference (line 65) | async def test_simple_inference(self):

FILE: tests/test_runtime/test_config.py
  function _get_config_directory (line 13) | def _get_config_directory():
  function _check_numclasscheckhook (line 29) | def _check_numclasscheckhook(detector, config_mod):
  function test_config_build_detector (line 64) | def test_config_build_detector():
  function _check_roi_head (line 115) | def _check_roi_head(config, head):
  function _check_roi_extractor (line 168) | def _check_roi_extractor(config, roi_extractor, prev_roi_extractor=None):
  function _check_mask_head (line 196) | def _check_mask_head(mask_cfg, mask_head):
  function _check_bbox_head (line 219) | def _check_bbox_head(bbox_cfg, bbox_head):
  function _check_anchorhead (line 265) | def _check_anchorhead(config, head):
  function test_config_data_pipeline (line 302) | def test_config_data_pipeline(config_rpath):

FILE: tests/test_runtime/test_eval_hook.py
  class ExampleDataset (line 17) | class ExampleDataset(Dataset):
    method __init__ (line 19) | def __init__(self):
    method __getitem__ (line 23) | def __getitem__(self, idx):
    method __len__ (line 27) | def __len__(self):
    method evaluate (line 31) | def evaluate(self, results, logger=None):
  class EvalDataset (line 35) | class EvalDataset(ExampleDataset):
    method evaluate (line 37) | def evaluate(self, results, logger=None):
  class ExampleModel (line 44) | class ExampleModel(nn.Module):
    method __init__ (line 46) | def __init__(self):
    method forward (line 51) | def forward(self, imgs, rescale=False, return_loss=False):
    method train_step (line 54) | def train_step(self, data_batch, optimizer, **kwargs):
  function test_eval_hook (line 70) | def test_eval_hook(EvalHookCls):

FILE: tests/test_runtime/test_fp16.py
  function test_cast_tensor_type (line 9) | def test_cast_tensor_type():
  function test_auto_fp16 (line 51) | def test_auto_fp16():
  function test_force_fp32 (line 177) | def test_force_fp32():

FILE: tests/test_utils/test_anchor.py
  function test_standard_anchor_generator (line 10) | def test_standard_anchor_generator():
  function test_strides (line 22) | def test_strides():
  function test_ssd_anchor_generator (line 43) | def test_ssd_anchor_generator():
  function test_anchor_generator_with_tuples (line 113) | def test_anchor_generator_with_tuples():
  function test_yolo_anchor_generator (line 148) | def test_yolo_anchor_generator():
  function test_retina_anchor (line 191) | def test_retina_anchor():
  function test_guided_anchor (line 290) | def test_guided_anchor():

FILE: tests/test_utils/test_assigner.py
  function test_max_iou_assigner (line 15) | def test_max_iou_assigner():
  function test_max_iou_assigner_with_ignore (line 39) | def test_max_iou_assigner_with_ignore():
  function test_max_iou_assigner_with_empty_gt (line 66) | def test_max_iou_assigner_with_empty_gt():
  function test_max_iou_assigner_with_empty_boxes (line 85) | def test_max_iou_assigner_with_empty_boxes():
  function test_max_iou_assigner_with_empty_boxes_and_ignore (line 109) | def test_max_iou_assigner_with_empty_boxes_and_ignore():
  function test_max_iou_assigner_with_empty_boxes_and_gt (line 143) | def test_max_iou_assigner_with_empty_boxes_and_gt():
  function test_point_assigner (line 155) | def test_point_assigner():
  function test_point_assigner_with_empty_gt (line 172) | def test_point_assigner_with_empty_gt():
  function test_point_assigner_with_empty_boxes_and_gt (line 188) | def test_point_assigner_with_empty_boxes_and_gt():
  function test_approx_iou_assigner (line 197) | def test_approx_iou_assigner():
  function test_approx_iou_assigner_with_empty_gt (line 222) | def test_approx_iou_assigner_with_empty_gt():
  function test_approx_iou_assigner_with_empty_boxes (line 245) | def test_approx_iou_assigner_with_empty_boxes():
  function test_approx_iou_assigner_with_empty_boxes_and_gt (line 264) | def test_approx_iou_assigner_with_empty_boxes_and_gt():
  function test_random_assign_result (line 280) | def test_random_assign_result():
  function test_center_region_assigner (line 294) | def test_center_region_assigner():
  function test_center_region_assigner_with_ignore (line 320) | def test_center_region_assigner_with_ignore():
  function test_center_region_assigner_with_empty_bboxes (line 349) | def test_center_region_assigner_with_empty_bboxes():
  function test_center_region_assigner_with_empty_gts (line 365) | def test_center_region_assigner_with_empty_gts():
  function test_hungarian_match_assigner (line 382) | def test_hungarian_match_assigner():
  function test_uniform_assigner (line 428) | def test_uniform_assigner():
  function test_uniform_assigner_with_empty_gt (line 456) | def test_uniform_assigner_with_empty_gt():
  function test_uniform_assigner_with_empty_boxes (line 478) | def test_uniform_assigner_with_empty_boxes():

FILE: tests/test_utils/test_coder.py
  function test_yolo_bbox_coder (line 8) | def test_yolo_bbox_coder():
  function test_delta_bbox_coder (line 26) | def test_delta_bbox_coder():
  function test_tblr_bbox_coder (line 77) | def test_tblr_bbox_coder():

FILE: tests/test_utils/test_masks.py
  function dummy_raw_bitmap_masks (line 8) | def dummy_raw_bitmap_masks(size):
  function dummy_raw_polygon_masks (line 19) | def dummy_raw_polygon_masks(size):
  function dummy_bboxes (line 35) | def dummy_bboxes(num, max_height, max_width):
  function test_bitmap_mask_init (line 42) | def test_bitmap_mask_init():
  function test_bitmap_mask_rescale (line 77) | def test_bitmap_mask_rescale():
  function test_bitmap_mask_resize (line 98) | def test_bitmap_mask_resize():
  function test_bitmap_mask_flip (line 132) | def test_bitmap_mask_flip():
  function test_bitmap_mask_pad (line 173) | def test_bitmap_mask_pad():
  function test_bitmap_mask_crop (line 192) | def test_bitmap_mask_crop():
  function test_bitmap_mask_crop_and_resize (line 218) | def test_bitmap_mask_crop_and_resize():
  function test_bitmap_mask_expand (line 241) | def test_bitmap_mask_expand():
  function test_bitmap_mask_area (line 261) | def test_bitmap_mask_area():
  function test_bitmap_mask_to_ndarray (line 275) | def test_bitmap_mask_to_ndarray():
  function test_bitmap_mask_to_tensor (line 292) | def test_bitmap_mask_to_tensor():
  function test_bitmap_mask_index (line 309) | def test_bitmap_mask_index():
  function test_bitmap_mask_iter (line 316) | def test_bitmap_mask_iter():
  function test_polygon_mask_init (line 324) | def test_polygon_mask_init():
  function test_polygon_mask_rescale (line 352) | def test_polygon_mask_rescale():
  function test_polygon_mask_resize (line 380) | def test_polygon_mask_resize():
  function test_polygon_mask_flip (line 449) | def test_polygon_mask_flip():
  function test_polygon_mask_crop (line 497) | def test_polygon_mask_crop():
  function test_polygon_mask_pad (line 526) | def test_polygon_mask_pad():
  function test_polygon_mask_expand (line 547) | def test_polygon_mask_expand():
  function test_polygon_mask_crop_and_resize (line 554) | def test_polygon_mask_crop_and_resize():
  function test_polygon_mask_area (line 579) | def test_polygon_mask_area():
  function test_polygon_mask_to_bitmap (line 596) | def test_polygon_mask_to_bitmap():
  function test_polygon_mask_to_ndarray (line 604) | def test_polygon_mask_to_ndarray():
  function test_polygon_to_tensor (line 620) | def test_polygon_to_tensor():
  function test_polygon_mask_index (line 637) | def test_polygon_mask_index():
  function test_polygon_mask_iter (line 651) | def test_polygon_mask_iter():

FILE: tests/test_utils/test_misc.py
  function dummy_raw_polygon_masks (line 10) | def dummy_raw_polygon_masks(size):
  function test_mask2ndarray (line 26) | def test_mask2ndarray():
  function test_distance2bbox (line 51) | def test_distance2bbox():

FILE: tests/test_utils/test_version.py
  function test_version_check (line 4) | def test_version_check():

FILE: tests/test_utils/test_visualization.py
  function test_color (line 14) | def test_color():
  function test_imshow_det_bboxes (line 31) | def test_imshow_det_bboxes():
  function test_imshow_gt_det_bboxes (line 80) | def test_imshow_gt_det_bboxes():

FILE: tools/analysis_tools/analyze_logs.py
  function cal_train_time (line 10) | def cal_train_time(log_dicts, args):
  function plot_curve (line 33) | def plot_curve(log_dicts, args):
  function add_plot_parser (line 92) | def add_plot_parser(subparsers):
  function add_time_parser (line 120) | def add_time_parser(subparsers):
  function parse_args (line 136) | def parse_args():
  function load_json_logs (line 146) | def load_json_logs(json_logs):
  function main (line 166) | def main():

FILE: tools/analysis_tools/analyze_results.py
  function bbox_map_eval (line 13) | def bbox_map_eval(det_result, annotation):
  class ResultVisualizer (line 48) | class ResultVisualizer(object):
    method __init__ (line 58) | def __init__(self, show=False, wait_time=0, score_thr=0):
    method _save_image_gts_results (line 63) | def _save_image_gts_results(self, dataset, results, mAPs, out_dir=None):
    method evaluate_and_show (line 89) | def evaluate_and_show(self,
  function parse_args (line 137) | def parse_args():
  function main (line 176) | def main():

FILE: tools/analysis_tools/benchmark.py
  function parse_args (line 15) | def parse_args():
  function main (line 40) | def main():

FILE: tools/analysis_tools/coco_error_analysis.py
  function makeplot (line 12) | def makeplot(rs, ps, outDir, class_name, iou_type):
  function autolabel (line 53) | def autolabel(ax, rects):
  function makebarplot (line 72) | def makebarplot(rs, ps, outDir, class_name, iou_type):
  function get_gt_area_group_numbers (line 107) | def get_gt_area_group_numbers(cocoEval):
  function make_gt_area_group_numbers_plot (line 122) | def make_gt_area_group_numbers_plot(cocoEval, outDir, verbose=True):
  function make_gt_area_histogram_plot (line 151) | def make_gt_area_histogram_plot(cocoEval, outDir):
  function analyze_individual_category (line 173) | def analyze_individual_category(k,
  function analyze_results (line 234) | def analyze_results(res_file,
  function main (line 307) | def main():

FILE: tools/analysis_tools/eval_metric.py
  function parse_args (line 9) | def parse_args():
  function main (line 46) | def main():

FILE: tools/analysis_tools/get_flops.py
  function parse_args (line 14) | def parse_args():
  function main (line 37) | def main():

FILE: tools/analysis_tools/robustness_eval.py
  function print_coco_results (line 8) | def print_coco_results(results):
  function get_coco_style_results (line 34) | def get_coco_style_results(filename,
  function get_voc_style_results (line 111) | def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'):
  function get_results (line 155) | def get_results(filename,
  function get_distortions_from_file (line 183) | def get_distortions_from_file(filename):
  function get_distortions_from_results (line 190) | def get_distortions_from_results(eval_output):
  function main (line 197) | def main():

FILE: tools/analysis_tools/test_robustness.py
  function coco_eval_with_return (line 23) | def coco_eval_with_return(result_files,
  function voc_eval_with_return (line 65) | def voc_eval_with_return(result_file,
  function parse_args (line 92) | def parse_args():
  function main (line 180) | def main():

FILE: tools/dataset_converters/cityscapes.py
  function collect_files (line 11) | def collect_files(img_dir, gt_dir):
  function collect_annotations (line 28) | def collect_annotations(files, nproc=1):
  function load_img_info (line 39) | def load_img_info(files):
  function cvt_annotations (line 84) | def cvt_annotations(image_infos, out_json_name):
  function parse_args (line 113) | def parse_args():
  function main (line 126) | def main():

FILE: tools/dataset_converters/pascal_voc.py
  function parse_xml (line 13) | def parse_xml(args):
  function cvt_annotations (line 67) | def cvt_annotations(devkit_path, years, split, out_file):
  function cvt_to_coco_json (line 95) | def cvt_to_coco_json(annotations):
  function parse_args (line 182) | def parse_args():
  function main (line 196) | def main():

FILE: tools/deployment/mmdet2torchserve.py
  function mmdet2torchserve (line 14) | def mmdet2torchserve(
  function parse_args (line 69) | def parse_args():

FILE: tools/deployment/mmdet_handler.py
  class MMdetHandler (line 11) | class MMdetHandler(BaseHandler):
    method initialize (line 14) | def initialize(self, context):
    method preprocess (line 30) | def preprocess(self, data):
    method inference (line 42) | def inference(self, data, *args, **kwargs):
    method postprocess (line 46) | def postprocess(self, data):

FILE: tools/deployment/onnx2tensorrt.py
  function get_GiB (line 18) | def get_GiB(x: int):
  function onnx2tensorrt (line 23) | def onnx2tensorrt(onnx_file,
  function parse_args (line 117) | def parse_args():

FILE: tools/deployment/pytorch2onnx.py
  function pytorch2onnx (line 16) | def pytorch2onnx(config_path,
  function parse_args (line 179) | def parse_args():

FILE: tools/deployment/test.py
  function parse_args (line 13) | def parse_args():
  function main (line 60) | def main():

FILE: tools/misc/browse_dataset.py
  function parse_args (line 13) | def parse_args():
  function retrieve_data_cfg (line 47) | def retrieve_data_cfg(config_path, skip_type, cfg_options):
  function main (line 63) | def main():

FILE: tools/misc/print_config.py
  function parse_args (line 7) | def parse_args():
  function main (line 40) | def main():

FILE: tools/model_converters/detectron2pytorch.py
  function convert_bn (line 10) | def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
  function convert_conv_fc (line 23) | def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
  function convert (line 34) | def convert(src, dst, depth):
  function main (line 72) | def main():

FILE: tools/model_converters/publish_model.py
  function parse_args (line 7) | def parse_args():
  function process_checkpoint (line 16) | def process_checkpoint(in_file, out_file):
  function main (line 33) | def main():

FILE: tools/model_converters/regnet2mmdet.py
  function convert_stem (line 7) | def convert_stem(model_key, model_weight, state_dict, converted_names):
  function convert_head (line 15) | def convert_head(model_key, model_weight, state_dict, converted_names):
  function convert_reslayer (line 22) | def convert_reslayer(model_key, model_weight, state_dict, converted_names):
  function convert (line 54) | def convert(src, dst):
  function main (line 80) | def main():

FILE: tools/model_converters/upgrade_model_version.py
  function is_head (line 10) | def is_head(key):
  function parse_config (line 18) | def parse_config(config_strings):
  function reorder_cls_channel (line 45) | def reorder_cls_channel(val, num_classes=81):
  function truncate_cls_channel (line 67) | def truncate_cls_channel(val, num_classes=81):
  function truncate_reg_channel (line 89) | def truncate_reg_channel(val, num_classes=81):
  function convert (line 114) | def convert(in_file, out_file, num_classes):
  function main (line 195) | def main():

FILE: tools/test.py
  function parse_args (line 19) | def parse_args():
  function main (line 101) | def main():

FILE: tools/train.py
  function parse_args (line 21) | def parse_args():
  function main (line 86) | def main():
Copy disabled (too large) Download .json
Condensed preview — 967 files, each showing path, character count, and a content snippet. Download the .json file for the full structured content (13,520K chars).
[
  {
    "path": ".gitignore",
    "chars": 21,
    "preview": "**/.DS_Store\n.vscode\n"
  },
  {
    "path": "LICENSE",
    "chars": 1103,
    "preview": "MIT License\n\nCopyright (c) 2022 Multimedia Computing Group, Nanjing University\n\nPermission is hereby granted, free of ch"
  },
  {
    "path": "Makefile",
    "chars": 822,
    "preview": "adamixer-r50:\n\tCUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 PORT=29502 ./tools/dist_train.sh \\\n\tconfigs/adamixer/adamixer_r50_1x"
  },
  {
    "path": "README.md",
    "chars": 14658,
    "preview": "# AdaMixer: A Fast-Converging Query-Based Object Detector [arxiv](https://arxiv.org/abs/2203.16507)\n\n> [**AdaMixer: A Fa"
  },
  {
    "path": "README_zh-CN.md",
    "chars": 7304,
    "preview": "<div align=\"center\">\n  <img src=\"resources/mmdet-logo.png\" width=\"600\"/>\n</div>\n\n**新闻**: 我们在 [ArXiv](https://arxiv.org/a"
  },
  {
    "path": "configs/_base_/datasets/cityscapes_detection.py",
    "chars": 1937,
    "preview": "# dataset settings\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n    mean=[123."
  },
  {
    "path": "configs/_base_/datasets/cityscapes_instance.py",
    "chars": 1963,
    "preview": "# dataset settings\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n    mean=[123."
  },
  {
    "path": "configs/_base_/datasets/coco_detection.py",
    "chars": 1914,
    "preview": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28,"
  },
  {
    "path": "configs/_base_/datasets/coco_detection_tiny.py",
    "chars": 1984,
    "preview": "dataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.3"
  },
  {
    "path": "configs/_base_/datasets/coco_instance.py",
    "chars": 1737,
    "preview": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28,"
  },
  {
    "path": "configs/_base_/datasets/coco_instance_semantic.py",
    "chars": 1922,
    "preview": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28,"
  },
  {
    "path": "configs/_base_/datasets/deepfashion.py",
    "chars": 1888,
    "preview": "# dataset settings\ndataset_type = 'DeepFashionDataset'\ndata_root = 'data/DeepFashion/In-shop/'\nimg_norm_cfg = dict(\n    "
  },
  {
    "path": "configs/_base_/datasets/lvis_v0.5_instance.py",
    "chars": 786,
    "preview": "# dataset settings\n_base_ = 'coco_instance.py'\ndataset_type = 'LVISV05Dataset'\ndata_root = 'data/lvis_v0.5/'\ndata = dict"
  },
  {
    "path": "configs/_base_/datasets/lvis_v1_instance.py",
    "chars": 736,
    "preview": "# dataset settings\n_base_ = 'coco_instance.py'\ndataset_type = 'LVISV1Dataset'\ndata_root = 'data/lvis_v1/'\ndata = dict(\n "
  },
  {
    "path": "configs/_base_/datasets/voc0712.py",
    "chars": 1916,
    "preview": "# dataset settings\ndataset_type = 'VOCDataset'\ndata_root = 'data/VOCdevkit/'\nimg_norm_cfg = dict(\n    mean=[123.675, 116"
  },
  {
    "path": "configs/_base_/datasets/wider_face.py",
    "chars": 2011,
    "preview": "# dataset settings\ndataset_type = 'WIDERFaceDataset'\ndata_root = 'data/WIDERFace/'\nimg_norm_cfg = dict(mean=[123.675, 11"
  },
  {
    "path": "configs/_base_/default_runtime.py",
    "chars": 368,
    "preview": "checkpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n    interval=50,\n    hooks=[\n        dict(type='T"
  },
  {
    "path": "configs/_base_/models/cascade_mask_rcnn_r50_fpn.py",
    "chars": 6912,
    "preview": "# model settings\nmodel = dict(\n    type='CascadeRCNN',\n    pretrained='torchvision://resnet50',\n    backbone=dict(\n     "
  },
  {
    "path": "configs/_base_/models/cascade_rcnn_r50_fpn.py",
    "chars": 6287,
    "preview": "# model settings\nmodel = dict(\n    type='CascadeRCNN',\n    pretrained='torchvision://resnet50',\n    backbone=dict(\n     "
  },
  {
    "path": "configs/_base_/models/fast_rcnn_r50_fpn.py",
    "chars": 2022,
    "preview": "# model settings\nmodel = dict(\n    type='FastRCNN',\n    pretrained='torchvision://resnet50',\n    backbone=dict(\n        "
  },
  {
    "path": "configs/_base_/models/faster_rcnn_r50_caffe_c4.py",
    "chars": 3631,
    "preview": "# model settings\nnorm_cfg = dict(type='BN', requires_grad=False)\nmodel = dict(\n    type='FasterRCNN',\n    pretrained='op"
  },
  {
    "path": "configs/_base_/models/faster_rcnn_r50_caffe_dc5.py",
    "chars": 3416,
    "preview": "# model settings\nnorm_cfg = dict(type='BN', requires_grad=False)\nmodel = dict(\n    type='FasterRCNN',\n    pretrained='op"
  },
  {
    "path": "configs/_base_/models/faster_rcnn_r50_fpn.py",
    "chars": 3594,
    "preview": "# model settings\nmodel = dict(\n    type='FasterRCNN',\n    pretrained='torchvision://resnet50',\n    backbone=dict(\n      "
  },
  {
    "path": "configs/_base_/models/mask_rcnn_r50_caffe_c4.py",
    "chars": 3998,
    "preview": "# model settings\nnorm_cfg = dict(type='BN', requires_grad=False)\nmodel = dict(\n    type='MaskRCNN',\n    pretrained='open"
  },
  {
    "path": "configs/_base_/models/mask_rcnn_r50_fpn.py",
    "chars": 4016,
    "preview": "# model settings\nmodel = dict(\n    type='MaskRCNN',\n    pretrained='torchvision://resnet50',\n    backbone=dict(\n        "
  },
  {
    "path": "configs/_base_/models/retinanet_r50_fpn.py",
    "chars": 1729,
    "preview": "# model settings\nmodel = dict(\n    type='RetinaNet',\n    pretrained='torchvision://resnet50',\n    backbone=dict(\n       "
  },
  {
    "path": "configs/_base_/models/rpn_r50_caffe_c4.py",
    "chars": 1725,
    "preview": "# model settings\nmodel = dict(\n    type='RPN',\n    pretrained='open-mmlab://detectron2/resnet50_caffe',\n    backbone=dic"
  },
  {
    "path": "configs/_base_/models/rpn_r50_fpn.py",
    "chars": 1769,
    "preview": "# model settings\nmodel = dict(\n    type='RPN',\n    pretrained='torchvision://resnet50',\n    backbone=dict(\n        type="
  },
  {
    "path": "configs/_base_/models/ssd300.py",
    "chars": 1529,
    "preview": "# model settings\ninput_size = 300\nmodel = dict(\n    type='SingleStageDetector',\n    pretrained='open-mmlab://vgg16_caffe"
  },
  {
    "path": "configs/_base_/schedules/schedule_1x.py",
    "chars": 319,
    "preview": "# optimizer\noptimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=N"
  },
  {
    "path": "configs/_base_/schedules/schedule_20e.py",
    "chars": 320,
    "preview": "# optimizer\noptimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=N"
  },
  {
    "path": "configs/_base_/schedules/schedule_2x.py",
    "chars": 320,
    "preview": "# optimizer\noptimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=N"
  },
  {
    "path": "configs/adamixer/README.md",
    "chars": 0,
    "preview": ""
  },
  {
    "path": "configs/adamixer/adamixer_dx101_300_query_crop_mstrain_480-800_3x_coco.py",
    "chars": 518,
    "preview": "_base_ = './adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py'\n\nmodel = dict(\n    pretrained='open-mmlab://resnext1"
  },
  {
    "path": "configs/adamixer/adamixer_r101_300_query_crop_mstrain_480-800_3x_coco.py",
    "chars": 146,
    "preview": "_base_ = './adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py'\n\nmodel = dict(pretrained='torchvision://resnet101', "
  },
  {
    "path": "configs/adamixer/adamixer_r101_mstrain_480-800_3x_coco.py",
    "chars": 131,
    "preview": "_base_ = './adamixer_r50_mstrain_480-800_3x_coco.py'\n\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(d"
  },
  {
    "path": "configs/adamixer/adamixer_r50_1x_coco.py",
    "chars": 5997,
    "preview": "def __get_debug():\n    import os\n    return 'C_DEBUG' in os.environ\n\n\ndebug = __get_debug()\n\nlog_interval = 100\n\n\n_base_"
  },
  {
    "path": "configs/adamixer/adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py",
    "chars": 2064,
    "preview": "_base_ = './adamixer_r50_mstrain_480-800_3x_coco.py'\nnum_query = 300\nmodel = dict(\n    rpn_head=dict(num_query=num_query"
  },
  {
    "path": "configs/adamixer/adamixer_r50_mstrain_480-800_3x_coco.py",
    "chars": 846,
    "preview": "_base_ = './adamixer_r50_1x_coco.py'\n\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.37"
  },
  {
    "path": "configs/adamixer/adamixer_swin_s_300_query_crop_mstrain_480-800_3x_coco.py",
    "chars": 1110,
    "preview": "_base_ = './adamixer_r50_300_query_crop_mstrain_480-800_3x_coco.py'\npretrained = './swin_small_patch4_window7_224.pth'\nm"
  },
  {
    "path": "configs/albu_example/README.md",
    "chars": 1044,
    "preview": "# Albu Example\n\n<!-- [OTHERS] -->\n\n```\n@article{2018arXiv180906839B,\n  author = {A. Buslaev, A. Parinov, E. Khvedchenya,"
  },
  {
    "path": "configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py",
    "chars": 2276,
    "preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\nimg_norm_cfg = dict(\n    mean=[123.675, 116.28, 103.53], std=[58.39"
  },
  {
    "path": "configs/atss/README.md",
    "chars": 1541,
    "preview": "# Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection\n\n## Introductio"
  },
  {
    "path": "configs/atss/atss_r101_fpn_1x_coco.py",
    "chars": 125,
    "preview": "_base_ = './atss_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='torchvision://resnet101',\n    backbone=dict(depth=101"
  },
  {
    "path": "configs/atss/atss_r50_fpn_1x_coco.py",
    "chars": 1887,
    "preview": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runt"
  },
  {
    "path": "configs/autoassign/README.md",
    "chars": 1434,
    "preview": "# AutoAssign: Differentiable Label Assignment for Dense Object Detection\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```\n@ar"
  },
  {
    "path": "configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py",
    "chars": 2645,
    "preview": "# We follow the original implementation which\n# adopts the Caffe pre-trained backbone.\n_base_ = [\n    '../_base_/dataset"
  },
  {
    "path": "configs/carafe/README.md",
    "chars": 2382,
    "preview": "# CARAFE: Content-Aware ReAssembly of FEatures\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\nWe provide config files to reprod"
  },
  {
    "path": "configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py",
    "chars": 1640,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    neck=dict(\n        type='FPN_CARAFE',\n       "
  },
  {
    "path": "configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py",
    "chars": 1971,
    "preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    neck=dict(\n        type='FPN_CARAFE',\n        in_"
  },
  {
    "path": "configs/cascade_rcnn/README.md",
    "chars": 12354,
    "preview": "# Cascade R-CNN: High Quality Object Detection and Instance Segmentation\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```late"
  },
  {
    "path": "configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py",
    "chars": 158,
    "preview": "_base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet101_"
  },
  {
    "path": "configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py",
    "chars": 127,
    "preview": "_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(depth"
  },
  {
    "path": "configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py",
    "chars": 128,
    "preview": "_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(dept"
  },
  {
    "path": "configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py",
    "chars": 1348,
    "preview": "_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']\n\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet50_caff"
  },
  {
    "path": "configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py",
    "chars": 182,
    "preview": "_base_ = [\n    '../_base_/models/cascade_mask_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_instance.py',\n    '../_base"
  },
  {
    "path": "configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py",
    "chars": 183,
    "preview": "_base_ = [\n    '../_base_/models/cascade_mask_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_instance.py',\n    '../_base"
  },
  {
    "path": "configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py",
    "chars": 376,
    "preview": "_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_32x4d',\n    back"
  },
  {
    "path": "configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py",
    "chars": 377,
    "preview": "_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_32x4d',\n    bac"
  },
  {
    "path": "configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py",
    "chars": 376,
    "preview": "_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_64x4d',\n    back"
  },
  {
    "path": "configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py",
    "chars": 377,
    "preview": "_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_64x4d',\n    bac"
  },
  {
    "path": "configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py",
    "chars": 153,
    "preview": "_base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet101_caffe"
  },
  {
    "path": "configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py",
    "chars": 122,
    "preview": "_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101)"
  },
  {
    "path": "configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py",
    "chars": 123,
    "preview": "_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101"
  },
  {
    "path": "configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py",
    "chars": 1309,
    "preview": "_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'\n\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet50_caffe',\n   "
  },
  {
    "path": "configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py",
    "chars": 178,
    "preview": "_base_ = [\n    '../_base_/models/cascade_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/sc"
  },
  {
    "path": "configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py",
    "chars": 149,
    "preview": "_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'\n# learning policy\nlr_config = dict(step=[16, 19])\nrunner = dict(type='Epoch"
  },
  {
    "path": "configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py",
    "chars": 371,
    "preview": "_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_32x4d',\n    backbone="
  },
  {
    "path": "configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py",
    "chars": 372,
    "preview": "_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_32x4d',\n    backbone"
  },
  {
    "path": "configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py",
    "chars": 395,
    "preview": "_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    type='CascadeRCNN',\n    pretrained='open-mmlab://resnext1"
  },
  {
    "path": "configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py",
    "chars": 396,
    "preview": "_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n    type='CascadeRCNN',\n    pretrained='open-mmlab://resnext"
  },
  {
    "path": "configs/cascade_rpn/README.md",
    "chars": 1747,
    "preview": "# Cascade RPN\n\n<!-- [ALGORITHM] -->\n\nWe provide the code for reproducing experiment results of [Cascade RPN](https://arx"
  },
  {
    "path": "configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py",
    "chars": 2770,
    "preview": "_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet50_caff"
  },
  {
    "path": "configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py",
    "chars": 3490,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'\nrpn_weight = 0.7\nmodel = dict(\n    rpn_head=dict(\n       "
  },
  {
    "path": "configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py",
    "chars": 2750,
    "preview": "_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py'\nmodel = dict(\n    rpn_head=dict(\n        _delete_=True,\n        type='Cas"
  },
  {
    "path": "configs/centripetalnet/README.md",
    "chars": 1510,
    "preview": "# CentripetalNet\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```latex\n@InProceedings{Dong_2020_CVPR,\nauthor = {Dong, Zhiwei "
  },
  {
    "path": "configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py",
    "chars": 3465,
    "preview": "_base_ = [\n    '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'\n]\n\n# model settings\nmodel = dict(\n"
  },
  {
    "path": "configs/cityscapes/README.md",
    "chars": 2913,
    "preview": "# Cityscapes Dataset\n\n<!-- [DATASET] -->\n\n```\n@inproceedings{Cordts2016Cityscapes,\n   title={The Cityscapes Dataset for "
  },
  {
    "path": "configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py",
    "chars": 1449,
    "preview": "_base_ = [\n    '../_base_/models/faster_rcnn_r50_fpn.py',\n    '../_base_/datasets/cityscapes_detection.py',\n    '../_bas"
  },
  {
    "path": "configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py",
    "chars": 1711,
    "preview": "_base_ = [\n    '../_base_/models/mask_rcnn_r50_fpn.py',\n    '../_base_/datasets/cityscapes_instance.py', '../_base_/defa"
  },
  {
    "path": "configs/cornernet/README.md",
    "chars": 2993,
    "preview": "# CornerNet\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```latex\n@inproceedings{law2018cornernet,\n  title={Cornernet: Detect"
  },
  {
    "path": "configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py",
    "chars": 3404,
    "preview": "_base_ = [\n    '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'\n]\n\n# model settings\nmodel = dict(\n"
  },
  {
    "path": "configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py",
    "chars": 3404,
    "preview": "_base_ = [\n    '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'\n]\n\n# model settings\nmodel = dict(\n"
  },
  {
    "path": "configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py",
    "chars": 3404,
    "preview": "_base_ = [\n    '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'\n]\n\n# model settings\nmodel = dict(\n"
  },
  {
    "path": "configs/dcn/README.md",
    "chars": 10083,
    "preview": "# Deformable Convolutional Networks\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```none\n@inproceedings{dai2017deformable,\n  "
  },
  {
    "path": "configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py",
    "chars": 222,
    "preview": "_base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type="
  },
  {
    "path": "configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py",
    "chars": 221,
    "preview": "_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type='"
  },
  {
    "path": "configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py",
    "chars": 228,
    "preview": "_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict"
  },
  {
    "path": "configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py",
    "chars": 217,
    "preview": "_base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type='DCN'"
  },
  {
    "path": "configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py",
    "chars": 216,
    "preview": "_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type='DCN',"
  },
  {
    "path": "configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py",
    "chars": 215,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type='DCN', "
  },
  {
    "path": "configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py",
    "chars": 214,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type='DCN', d"
  },
  {
    "path": "configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py",
    "chars": 408,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    roi_head=dict(\n        bbox_roi_extractor=dic"
  },
  {
    "path": "configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py",
    "chars": 216,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type='DCNv2',"
  },
  {
    "path": "configs/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py",
    "chars": 216,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type='DCNv2',"
  },
  {
    "path": "configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py",
    "chars": 417,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    roi_head=dict(\n        bbox_roi_extractor=dic"
  },
  {
    "path": "configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py",
    "chars": 506,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_32x4d',\n "
  },
  {
    "path": "configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py",
    "chars": 211,
    "preview": "_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type='DCN', defo"
  },
  {
    "path": "configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py",
    "chars": 210,
    "preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type='DCN', defor"
  },
  {
    "path": "configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py",
    "chars": 212,
    "preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        dcn=dict(type='DCNv2', def"
  },
  {
    "path": "configs/deepfashion/README.md",
    "chars": 2402,
    "preview": "# DeepFashion\n\n<!-- [DATASET] -->\n\n[MMFashion](https://github.com/open-mmlab/mmfashion) develops \"fashion parsing and se"
  },
  {
    "path": "configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py",
    "chars": 351,
    "preview": "_base_ = [\n    '../_base_/models/mask_rcnn_r50_fpn.py',\n    '../_base_/datasets/deepfashion.py', '../_base_/schedules/sc"
  },
  {
    "path": "configs/deformable_detr/README.md",
    "chars": 2658,
    "preview": "# Deformable DETR\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\nWe provide the config files for Deformable DETR: [Deformable D"
  },
  {
    "path": "configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py",
    "chars": 6440,
    "preview": "_base_ = [\n    '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'\n]\nmodel = dict(\n    type='Deformab"
  },
  {
    "path": "configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py",
    "chars": 99,
    "preview": "_base_ = 'deformable_detr_r50_16x2_50e_coco.py'\nmodel = dict(bbox_head=dict(with_box_refine=True))\n"
  },
  {
    "path": "configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py",
    "chars": 103,
    "preview": "_base_ = 'deformable_detr_refine_r50_16x2_50e_coco.py'\nmodel = dict(bbox_head=dict(as_two_stage=True))\n"
  },
  {
    "path": "configs/detectors/README.md",
    "chars": 4273,
    "preview": "# DetectoRS\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\nWe provide the config files for [DetectoRS: Detecting Objects with R"
  },
  {
    "path": "configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py",
    "chars": 851,
    "preview": "_base_ = [\n    '../_base_/models/cascade_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/sc"
  },
  {
    "path": "configs/detectors/cascade_rcnn_r50_sac_1x_coco.py",
    "chars": 382,
    "preview": "_base_ = [\n    '../_base_/models/cascade_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/sc"
  },
  {
    "path": "configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py",
    "chars": 1053,
    "preview": "_base_ = [\n    '../_base_/models/cascade_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/sc"
  },
  {
    "path": "configs/detectors/detectors_htc_r50_1x_coco.py",
    "chars": 916,
    "preview": "_base_ = '../htc/htc_r50_fpn_1x_coco.py'\n\nmodel = dict(\n    backbone=dict(\n        type='DetectoRS_ResNet',\n        conv"
  },
  {
    "path": "configs/detectors/htc_r50_rfp_1x_coco.py",
    "chars": 714,
    "preview": "_base_ = '../htc/htc_r50_fpn_1x_coco.py'\n\nmodel = dict(\n    backbone=dict(\n        type='DetectoRS_ResNet',\n        conv"
  },
  {
    "path": "configs/detectors/htc_r50_sac_1x_coco.py",
    "chars": 245,
    "preview": "_base_ = '../htc/htc_r50_fpn_1x_coco.py'\n\nmodel = dict(\n    backbone=dict(\n        type='DetectoRS_ResNet',\n        conv"
  },
  {
    "path": "configs/detr/README.md",
    "chars": 1142,
    "preview": "# DETR\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\nWe provide the config files for DETR: [End-to-End Object Detection with T"
  },
  {
    "path": "configs/detr/detr_r50_8x2_150e_coco.py",
    "chars": 5820,
    "preview": "_base_ = [\n    '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'\n]\nmodel = dict(\n    type='DETR',\n "
  },
  {
    "path": "configs/double_heads/README.md",
    "chars": 1163,
    "preview": "# Rethinking Classification and Localization for Object Detection\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```latex\n@arti"
  },
  {
    "path": "configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py",
    "chars": 845,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    roi_head=dict(\n        type='DoubleHeadRoIHea"
  },
  {
    "path": "configs/dynamic_rcnn/README.md",
    "chars": 1073,
    "preview": "# Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```\n"
  },
  {
    "path": "configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py",
    "chars": 1051,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    roi_head=dict(\n        type='DynamicRoIHead',"
  },
  {
    "path": "configs/empirical_attention/README.md",
    "chars": 3099,
    "preview": "# An Empirical Study of Spatial Attention Mechanisms in Deep Networks\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```latex\n@"
  },
  {
    "path": "configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py",
    "chars": 403,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(plugins=[\n        dict(\n       "
  },
  {
    "path": "configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py",
    "chars": 575,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        plugins=[\n            "
  },
  {
    "path": "configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py",
    "chars": 403,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(plugins=[\n        dict(\n       "
  },
  {
    "path": "configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py",
    "chars": 575,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        plugins=[\n            "
  },
  {
    "path": "configs/fast_rcnn/README.md",
    "chars": 270,
    "preview": "# Fast R-CNN\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```latex\n@inproceedings{girshick2015fast,\n  title={Fast r-cnn},\n  a"
  },
  {
    "path": "configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py",
    "chars": 150,
    "preview": "_base_ = './fast_rcnn_r50_caffe_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet101_caffe',\n"
  },
  {
    "path": "configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py",
    "chars": 119,
    "preview": "_base_ = './fast_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))\n"
  },
  {
    "path": "configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py",
    "chars": 119,
    "preview": "_base_ = './fast_rcnn_r50_fpn_2x_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))\n"
  },
  {
    "path": "configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py",
    "chars": 1639,
    "preview": "_base_ = './fast_rcnn_r50_fpn_1x_coco.py'\n\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet50_caffe',\n    ba"
  },
  {
    "path": "configs/fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py",
    "chars": 1944,
    "preview": "_base_ = [\n    '../_base_/models/fast_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/sched"
  },
  {
    "path": "configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py",
    "chars": 147,
    "preview": "_base_ = './fast_rcnn_r50_fpn_1x_coco.py'\n\n# learning policy\nlr_config = dict(step=[16, 22])\nrunner = dict(type='EpochBa"
  },
  {
    "path": "configs/faster_rcnn/README.md",
    "chars": 13728,
    "preview": "# Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py",
    "chars": 152,
    "preview": "_base_ = './faster_rcnn_r50_caffe_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet101_caffe'"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py",
    "chars": 121,
    "preview": "_base_ = './faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py",
    "chars": 121,
    "preview": "_base_ = './faster_rcnn_r50_fpn_2x_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py",
    "chars": 1388,
    "preview": "_base_ = [\n    '../_base_/models/faster_rcnn_r50_caffe_c4.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py",
    "chars": 1304,
    "preview": "_base_ = [\n    '../_base_/models/faster_rcnn_r50_caffe_dc5.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_bas"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py",
    "chars": 1448,
    "preview": "_base_ = [\n    '../_base_/models/faster_rcnn_r50_caffe_dc5.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_bas"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py",
    "chars": 162,
    "preview": "_base_ = './faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py'\n# learning policy\nlr_config = dict(step=[28, 34])\nrunner = dic"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py",
    "chars": 1331,
    "preview": "_base_ = './faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet50_caffe',\n    b"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py",
    "chars": 475,
    "preview": "_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'\nmodel = dict(roi_head=dict(bbox_head=dict(num_classes=3)))\ncla"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py",
    "chars": 459,
    "preview": "_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'\nmodel = dict(roi_head=dict(bbox_head=dict(num_classes=1)))\ncla"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py",
    "chars": 1475,
    "preview": "_base_ = './faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet50_caffe',\n    b"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py",
    "chars": 162,
    "preview": "_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'\n# learning policy\nlr_config = dict(step=[16, 23])\nrunner = dic"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py",
    "chars": 162,
    "preview": "_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'\n# learning policy\nlr_config = dict(step=[28, 34])\nrunner = dic"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py",
    "chars": 380,
    "preview": "_base_ = 'faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py'\n\n# learning policy\nlr_config = dict(\n    policy='step',\n    warm"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py",
    "chars": 177,
    "preview": "_base_ = [\n    '../_base_/models/faster_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/sch"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py",
    "chars": 177,
    "preview": "_base_ = [\n    '../_base_/models/faster_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/sch"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py",
    "chars": 207,
    "preview": "_base_ = './faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    roi_head=dict(\n        bbox_head=dict(\n            reg_dec"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py",
    "chars": 201,
    "preview": "_base_ = './faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    roi_head=dict(\n        bbox_head=dict(\n            reg_dec"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py",
    "chars": 200,
    "preview": "_base_ = './faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    roi_head=dict(\n        bbox_head=dict(\n            reg_dec"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py",
    "chars": 118,
    "preview": "_base_ = './faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(train_cfg=dict(rcnn=dict(sampler=dict(type='OHEMSampler'))))\n"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py",
    "chars": 347,
    "preview": "_base_ = [\n    '../_base_/models/faster_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/sch"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py",
    "chars": 370,
    "preview": "_base_ = './faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_32x4d',\n    backbone=d"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py",
    "chars": 370,
    "preview": "_base_ = './faster_rcnn_r50_fpn_2x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_32x4d',\n    backbone=d"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py",
    "chars": 370,
    "preview": "_base_ = './faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_64x4d',\n    backbone=d"
  },
  {
    "path": "configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py",
    "chars": 370,
    "preview": "_base_ = './faster_rcnn_r50_fpn_2x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_64x4d',\n    backbone=d"
  },
  {
    "path": "configs/fcos/README.md",
    "chars": 5195,
    "preview": "# FCOS: Fully Convolutional One-Stage Object Detection\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```latex\n@article{tian201"
  },
  {
    "path": "configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py",
    "chars": 1697,
    "preview": "_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'\n\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet50_caffe',"
  },
  {
    "path": "configs/fcos/fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py",
    "chars": 1841,
    "preview": "_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'\n\nmodel = dict(\n    pretrained='open-mmlab://detectron2/resnet50_caffe',"
  },
  {
    "path": "configs/fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py",
    "chars": 128,
    "preview": "_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'\nmodel = dict(bbox_head=dict(center_sampling=True, center_sample_radiu"
  },
  {
    "path": "configs/fcos/fcos_r101_caffe_fpn_gn-head_1x_coco.py",
    "chars": 152,
    "preview": "_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://detectron/resnet101_caffe'"
  },
  {
    "path": "configs/fcos/fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py",
    "chars": 1478,
    "preview": "_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://detectron/resnet101_caffe'"
  },
  {
    "path": "configs/fcos/fcos_r50_caffe_fpn_gn-head_1x_coco.py",
    "chars": 3248,
    "preview": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runt"
  },
  {
    "path": "configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py",
    "chars": 166,
    "preview": "# TODO: Remove this config after benchmarking all related configs\n_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'\n\ndata"
  },
  {
    "path": "configs/fcos/fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py",
    "chars": 1331,
    "preview": "_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'\nimg_norm_cfg = dict(\n    mean=[102.9801, 115.9465, 122.7717], std=[1."
  },
  {
    "path": "configs/fcos/fcos_r50_torch_fpn_gn-head_4x4_1x_coco.py",
    "chars": 469,
    "preview": "# TODO: Remove this config after benchmarking all related configs\n_base_ = 'fcos_r50_caffe_fpn_gn-head_1x_coco.py'\n\ndata"
  },
  {
    "path": "configs/fcos/fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py",
    "chars": 1915,
    "preview": "_base_ = './fcos_r50_caffe_fpn_gn-head_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_64x4d',\n    bac"
  },
  {
    "path": "configs/foveabox/README.md",
    "chars": 6029,
    "preview": "# FoveaBox: Beyond Anchor-based Object Detector\n\n<!-- [ALGORITHM] -->\n\nFoveaBox is an accurate, flexible and completely "
  },
  {
    "path": "configs/foveabox/fovea_align_r101_fpn_gn-head_4x4_2x_coco.py",
    "chars": 348,
    "preview": "_base_ = './fovea_r50_fpn_4x4_1x_coco.py'\nmodel = dict(\n    pretrained='torchvision://resnet101',\n    backbone=dict(dept"
  },
  {
    "path": "configs/foveabox/fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py",
    "chars": 973,
    "preview": "_base_ = './fovea_r50_fpn_4x4_1x_coco.py'\nmodel = dict(\n    pretrained='torchvision://resnet101',\n    backbone=dict(dept"
  },
  {
    "path": "configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py",
    "chars": 362,
    "preview": "_base_ = './fovea_r50_fpn_4x4_1x_coco.py'\nmodel = dict(\n    bbox_head=dict(\n        with_deform=True,\n        norm_cfg=d"
  },
  {
    "path": "configs/foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py",
    "chars": 901,
    "preview": "_base_ = './fovea_r50_fpn_4x4_1x_coco.py'\nmodel = dict(\n    bbox_head=dict(\n        with_deform=True,\n        norm_cfg=d"
  },
  {
    "path": "configs/foveabox/fovea_r101_fpn_4x4_1x_coco.py",
    "chars": 119,
    "preview": "_base_ = './fovea_r50_fpn_4x4_1x_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))\n"
  },
  {
    "path": "configs/foveabox/fovea_r101_fpn_4x4_2x_coco.py",
    "chars": 119,
    "preview": "_base_ = './fovea_r50_fpn_4x4_2x_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))\n"
  },
  {
    "path": "configs/foveabox/fovea_r50_fpn_4x4_1x_coco.py",
    "chars": 1574,
    "preview": "_base_ = [\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/schedules/schedule_1x.py', '../_base_/default_runt"
  },
  {
    "path": "configs/foveabox/fovea_r50_fpn_4x4_2x_coco.py",
    "chars": 146,
    "preview": "_base_ = './fovea_r50_fpn_4x4_1x_coco.py'\n# learning policy\nlr_config = dict(step=[16, 22])\nrunner = dict(type='EpochBas"
  },
  {
    "path": "configs/fp16/README.md",
    "chars": 2212,
    "preview": "# Mixed Precision Training\n\n## Introduction\n\n<!-- [OTHERS] -->\n\n```latex\n@article{micikevicius2017mixed,\n  title={Mixed "
  },
  {
    "path": "configs/fp16/faster_rcnn_r50_fpn_fp16_1x_coco.py",
    "chars": 102,
    "preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\n# fp16 settings\nfp16 = dict(loss_scale=512.)\n"
  },
  {
    "path": "configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py",
    "chars": 98,
    "preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\n# fp16 settings\nfp16 = dict(loss_scale=512.)\n"
  },
  {
    "path": "configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py",
    "chars": 98,
    "preview": "_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'\n# fp16 settings\nfp16 = dict(loss_scale=512.)\n"
  },
  {
    "path": "configs/fpg/README.md",
    "chars": 3825,
    "preview": "# Feature Pyramid Grids\n\n## Introduction\n\n```latex\n@article{chen2020feature,\n  title={Feature pyramid grids},\n  author={"
  },
  {
    "path": "configs/fpg/faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py",
    "chars": 314,
    "preview": "_base_ = 'faster_rcnn_r50_fpg_crop640_50e_coco.py'\n\nnorm_cfg = dict(type='BN', requires_grad=True)\nmodel = dict(\n    nec"
  },
  {
    "path": "configs/fpg/faster_rcnn_r50_fpg_crop640_50e_coco.py",
    "chars": 1452,
    "preview": "_base_ = 'faster_rcnn_r50_fpn_crop640_50e_coco.py'\n\nnorm_cfg = dict(type='BN', requires_grad=True)\nmodel = dict(\n    nec"
  },
  {
    "path": "configs/fpg/faster_rcnn_r50_fpn_crop640_50e_coco.py",
    "chars": 2140,
    "preview": "_base_ = [\n    '../_base_/models/faster_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_detection.py',\n    '../_base_/sch"
  },
  {
    "path": "configs/fpg/mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py",
    "chars": 357,
    "preview": "_base_ = 'mask_rcnn_r50_fpg_crop640_50e_coco.py'\n\nmodel = dict(\n    neck=dict(out_channels=128, inter_channels=128),\n   "
  },
  {
    "path": "configs/fpg/mask_rcnn_r50_fpg_crop640_50e_coco.py",
    "chars": 1450,
    "preview": "_base_ = 'mask_rcnn_r50_fpn_crop640_50e_coco.py'\n\nnorm_cfg = dict(type='BN', requires_grad=True)\nmodel = dict(\n    neck="
  },
  {
    "path": "configs/fpg/mask_rcnn_r50_fpn_crop640_50e_coco.py",
    "chars": 2312,
    "preview": "_base_ = [\n    '../_base_/models/mask_rcnn_r50_fpn.py',\n    '../_base_/datasets/coco_instance.py',\n    '../_base_/schedu"
  },
  {
    "path": "configs/fpg/retinanet_r50_fpg-chn128_crop640_50e_coco.py",
    "chars": 154,
    "preview": "_base_ = 'retinanet_r50_fpg_crop640_50e_coco.py'\n\nmodel = dict(\n    neck=dict(out_channels=128, inter_channels=128),\n   "
  },
  {
    "path": "configs/fpg/retinanet_r50_fpg_crop640_50e_coco.py",
    "chars": 1571,
    "preview": "_base_ = '../nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py'\n\nnorm_cfg = dict(type='BN', requires_grad=True)\nmodel = d"
  },
  {
    "path": "configs/free_anchor/README.md",
    "chars": 2402,
    "preview": "# FreeAnchor: Learning to Match Anchors for Visual Object Detection\n\n## Introduction\n\n<!-- [ALGORITHM] -->\n\n```latex\n@in"
  },
  {
    "path": "configs/free_anchor/retinanet_free_anchor_r101_fpn_1x_coco.py",
    "chars": 131,
    "preview": "_base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(d"
  },
  {
    "path": "configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py",
    "chars": 775,
    "preview": "_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'\nmodel = dict(\n    bbox_head=dict(\n        _delete_=True,\n        ty"
  },
  {
    "path": "configs/free_anchor/retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py",
    "chars": 326,
    "preview": "_base_ = './retinanet_free_anchor_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_32x4d',\n    "
  },
  {
    "path": "configs/fsaf/README.md",
    "chars": 3723,
    "preview": "# Feature Selective Anchor-Free Module for Single-Shot Object Detection\n\n<!-- [ALGORITHM] -->\n\nFSAF is an anchor-free me"
  },
  {
    "path": "configs/fsaf/fsaf_r101_fpn_1x_coco.py",
    "chars": 114,
    "preview": "_base_ = './fsaf_r50_fpn_1x_coco.py'\nmodel = dict(pretrained='torchvision://resnet101', backbone=dict(depth=101))\n"
  },
  {
    "path": "configs/fsaf/fsaf_r50_fpn_1x_coco.py",
    "chars": 1554,
    "preview": "_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py'\n# model settings\nmodel = dict(\n    type='FSAF',\n    bbox_head=dict("
  },
  {
    "path": "configs/fsaf/fsaf_x101_64x4d_fpn_1x_coco.py",
    "chars": 363,
    "preview": "_base_ = './fsaf_r50_fpn_1x_coco.py'\nmodel = dict(\n    pretrained='open-mmlab://resnext101_64x4d',\n    backbone=dict(\n  "
  },
  {
    "path": "configs/gcnet/README.md",
    "chars": 14453,
    "preview": "# GCNet for Object Detection\n\nBy [Yue Cao](http://yue-cao.me), [Jiarui Xu](http://jerryxu.net), [Stephen Lin](https://sc"
  },
  {
    "path": "configs/gcnet/cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py",
    "chars": 180,
    "preview": "_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py'\nmodel = dict(\n    backbone=dict(\n        norm_cfg"
  }
]

// ... and 767 more files (download for full content)

About this extraction

This page contains the full source code of the MCG-NJU/AdaMixer GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 967 files (12.7 MB), approximately 3.4M tokens, and a symbol index with 2194 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!