master 5d1704db7489 cached
483 files
3.8 MB
1.0M tokens
3240 symbols
1 requests
Download .txt
Showing preview only (4,111K chars total). Download the full file or copy to clipboard to get everything.
Repository: RockeyCoss/Prompt-Segment-Anything
Branch: master
Commit: 5d1704db7489
Files: 483
Total size: 3.8 MB

Directory structure:
gitextract_fypon3it/

├── .gitignore
├── LICENSE
├── README.md
├── app.py
├── mmdet/
│   ├── __init__.py
│   ├── apis/
│   │   ├── __init__.py
│   │   ├── inference.py
│   │   ├── test.py
│   │   └── train.py
│   ├── core/
│   │   ├── __init__.py
│   │   ├── anchor/
│   │   │   ├── __init__.py
│   │   │   ├── anchor_generator.py
│   │   │   ├── builder.py
│   │   │   ├── point_generator.py
│   │   │   └── utils.py
│   │   ├── bbox/
│   │   │   ├── __init__.py
│   │   │   ├── assigners/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── approx_max_iou_assigner.py
│   │   │   │   ├── ascend_assign_result.py
│   │   │   │   ├── ascend_max_iou_assigner.py
│   │   │   │   ├── assign_result.py
│   │   │   │   ├── atss_assigner.py
│   │   │   │   ├── base_assigner.py
│   │   │   │   ├── center_region_assigner.py
│   │   │   │   ├── grid_assigner.py
│   │   │   │   ├── hungarian_assigner.py
│   │   │   │   ├── mask_hungarian_assigner.py
│   │   │   │   ├── max_iou_assigner.py
│   │   │   │   ├── point_assigner.py
│   │   │   │   ├── region_assigner.py
│   │   │   │   ├── sim_ota_assigner.py
│   │   │   │   ├── task_aligned_assigner.py
│   │   │   │   └── uniform_assigner.py
│   │   │   ├── builder.py
│   │   │   ├── coder/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_bbox_coder.py
│   │   │   │   ├── bucketing_bbox_coder.py
│   │   │   │   ├── delta_xywh_bbox_coder.py
│   │   │   │   ├── distance_point_bbox_coder.py
│   │   │   │   ├── legacy_delta_xywh_bbox_coder.py
│   │   │   │   ├── pseudo_bbox_coder.py
│   │   │   │   ├── tblr_bbox_coder.py
│   │   │   │   └── yolo_bbox_coder.py
│   │   │   ├── demodata.py
│   │   │   ├── iou_calculators/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── builder.py
│   │   │   │   └── iou2d_calculator.py
│   │   │   ├── match_costs/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── builder.py
│   │   │   │   └── match_cost.py
│   │   │   ├── samplers/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_sampler.py
│   │   │   │   ├── combined_sampler.py
│   │   │   │   ├── instance_balanced_pos_sampler.py
│   │   │   │   ├── iou_balanced_neg_sampler.py
│   │   │   │   ├── mask_pseudo_sampler.py
│   │   │   │   ├── mask_sampling_result.py
│   │   │   │   ├── ohem_sampler.py
│   │   │   │   ├── pseudo_sampler.py
│   │   │   │   ├── random_sampler.py
│   │   │   │   ├── sampling_result.py
│   │   │   │   └── score_hlr_sampler.py
│   │   │   └── transforms.py
│   │   ├── data_structures/
│   │   │   ├── __init__.py
│   │   │   ├── general_data.py
│   │   │   └── instance_data.py
│   │   ├── evaluation/
│   │   │   ├── __init__.py
│   │   │   ├── bbox_overlaps.py
│   │   │   ├── class_names.py
│   │   │   ├── eval_hooks.py
│   │   │   ├── mean_ap.py
│   │   │   ├── panoptic_utils.py
│   │   │   └── recall.py
│   │   ├── export/
│   │   │   ├── __init__.py
│   │   │   ├── model_wrappers.py
│   │   │   ├── onnx_helper.py
│   │   │   └── pytorch2onnx.py
│   │   ├── hook/
│   │   │   ├── __init__.py
│   │   │   ├── checkloss_hook.py
│   │   │   ├── ema.py
│   │   │   ├── memory_profiler_hook.py
│   │   │   ├── set_epoch_info_hook.py
│   │   │   ├── sync_norm_hook.py
│   │   │   ├── sync_random_size_hook.py
│   │   │   ├── wandblogger_hook.py
│   │   │   ├── yolox_lrupdater_hook.py
│   │   │   └── yolox_mode_switch_hook.py
│   │   ├── mask/
│   │   │   ├── __init__.py
│   │   │   ├── mask_target.py
│   │   │   ├── structures.py
│   │   │   └── utils.py
│   │   ├── optimizers/
│   │   │   ├── __init__.py
│   │   │   ├── builder.py
│   │   │   └── layer_decay_optimizer_constructor.py
│   │   ├── post_processing/
│   │   │   ├── __init__.py
│   │   │   ├── bbox_nms.py
│   │   │   ├── matrix_nms.py
│   │   │   └── merge_augs.py
│   │   ├── utils/
│   │   │   ├── __init__.py
│   │   │   ├── dist_utils.py
│   │   │   └── misc.py
│   │   └── visualization/
│   │       ├── __init__.py
│   │       ├── image.py
│   │       └── palette.py
│   ├── datasets/
│   │   ├── __init__.py
│   │   ├── api_wrappers/
│   │   │   ├── __init__.py
│   │   │   ├── coco_api.py
│   │   │   └── panoptic_evaluation.py
│   │   ├── builder.py
│   │   ├── cityscapes.py
│   │   ├── coco.py
│   │   ├── coco_occluded.py
│   │   ├── coco_panoptic.py
│   │   ├── custom.py
│   │   ├── dataset_wrappers.py
│   │   ├── deepfashion.py
│   │   ├── lvis.py
│   │   ├── objects365.py
│   │   ├── openimages.py
│   │   ├── pipelines/
│   │   │   ├── __init__.py
│   │   │   ├── auto_augment.py
│   │   │   ├── compose.py
│   │   │   ├── formating.py
│   │   │   ├── formatting.py
│   │   │   ├── instaboost.py
│   │   │   ├── loading.py
│   │   │   ├── test_time_aug.py
│   │   │   └── transforms.py
│   │   ├── samplers/
│   │   │   ├── __init__.py
│   │   │   ├── class_aware_sampler.py
│   │   │   ├── distributed_sampler.py
│   │   │   ├── group_sampler.py
│   │   │   └── infinite_sampler.py
│   │   ├── utils.py
│   │   ├── voc.py
│   │   ├── wider_face.py
│   │   └── xml_style.py
│   ├── models/
│   │   ├── __init__.py
│   │   ├── backbones/
│   │   │   ├── __init__.py
│   │   │   ├── csp_darknet.py
│   │   │   ├── darknet.py
│   │   │   ├── detectors_resnet.py
│   │   │   ├── detectors_resnext.py
│   │   │   ├── efficientnet.py
│   │   │   ├── hourglass.py
│   │   │   ├── hrnet.py
│   │   │   ├── mobilenet_v2.py
│   │   │   ├── pvt.py
│   │   │   ├── regnet.py
│   │   │   ├── res2net.py
│   │   │   ├── resnest.py
│   │   │   ├── resnet.py
│   │   │   ├── resnext.py
│   │   │   ├── ssd_vgg.py
│   │   │   ├── swin.py
│   │   │   └── trident_resnet.py
│   │   ├── builder.py
│   │   ├── dense_heads/
│   │   │   ├── __init__.py
│   │   │   ├── anchor_free_head.py
│   │   │   ├── anchor_head.py
│   │   │   ├── ascend_anchor_head.py
│   │   │   ├── ascend_retina_head.py
│   │   │   ├── ascend_ssd_head.py
│   │   │   ├── atss_head.py
│   │   │   ├── autoassign_head.py
│   │   │   ├── base_dense_head.py
│   │   │   ├── base_mask_head.py
│   │   │   ├── cascade_rpn_head.py
│   │   │   ├── centernet_head.py
│   │   │   ├── centripetal_head.py
│   │   │   ├── corner_head.py
│   │   │   ├── ddod_head.py
│   │   │   ├── deformable_detr_head.py
│   │   │   ├── dense_test_mixins.py
│   │   │   ├── detr_head.py
│   │   │   ├── embedding_rpn_head.py
│   │   │   ├── fcos_head.py
│   │   │   ├── fovea_head.py
│   │   │   ├── free_anchor_retina_head.py
│   │   │   ├── fsaf_head.py
│   │   │   ├── ga_retina_head.py
│   │   │   ├── ga_rpn_head.py
│   │   │   ├── gfl_head.py
│   │   │   ├── guided_anchor_head.py
│   │   │   ├── lad_head.py
│   │   │   ├── ld_head.py
│   │   │   ├── mask2former_head.py
│   │   │   ├── maskformer_head.py
│   │   │   ├── nasfcos_head.py
│   │   │   ├── paa_head.py
│   │   │   ├── pisa_retinanet_head.py
│   │   │   ├── pisa_ssd_head.py
│   │   │   ├── reppoints_head.py
│   │   │   ├── retina_head.py
│   │   │   ├── retina_sepbn_head.py
│   │   │   ├── rpn_head.py
│   │   │   ├── sabl_retina_head.py
│   │   │   ├── solo_head.py
│   │   │   ├── solov2_head.py
│   │   │   ├── ssd_head.py
│   │   │   ├── tood_head.py
│   │   │   ├── vfnet_head.py
│   │   │   ├── yolact_head.py
│   │   │   ├── yolo_head.py
│   │   │   ├── yolof_head.py
│   │   │   └── yolox_head.py
│   │   ├── detectors/
│   │   │   ├── __init__.py
│   │   │   ├── atss.py
│   │   │   ├── autoassign.py
│   │   │   ├── base.py
│   │   │   ├── cascade_rcnn.py
│   │   │   ├── centernet.py
│   │   │   ├── cornernet.py
│   │   │   ├── ddod.py
│   │   │   ├── deformable_detr.py
│   │   │   ├── detr.py
│   │   │   ├── fast_rcnn.py
│   │   │   ├── faster_rcnn.py
│   │   │   ├── fcos.py
│   │   │   ├── fovea.py
│   │   │   ├── fsaf.py
│   │   │   ├── gfl.py
│   │   │   ├── grid_rcnn.py
│   │   │   ├── htc.py
│   │   │   ├── kd_one_stage.py
│   │   │   ├── lad.py
│   │   │   ├── mask2former.py
│   │   │   ├── mask_rcnn.py
│   │   │   ├── mask_scoring_rcnn.py
│   │   │   ├── maskformer.py
│   │   │   ├── nasfcos.py
│   │   │   ├── paa.py
│   │   │   ├── panoptic_fpn.py
│   │   │   ├── panoptic_two_stage_segmentor.py
│   │   │   ├── point_rend.py
│   │   │   ├── queryinst.py
│   │   │   ├── reppoints_detector.py
│   │   │   ├── retinanet.py
│   │   │   ├── rpn.py
│   │   │   ├── scnet.py
│   │   │   ├── single_stage.py
│   │   │   ├── single_stage_instance_seg.py
│   │   │   ├── solo.py
│   │   │   ├── solov2.py
│   │   │   ├── sparse_rcnn.py
│   │   │   ├── tood.py
│   │   │   ├── trident_faster_rcnn.py
│   │   │   ├── two_stage.py
│   │   │   ├── vfnet.py
│   │   │   ├── yolact.py
│   │   │   ├── yolo.py
│   │   │   ├── yolof.py
│   │   │   └── yolox.py
│   │   ├── losses/
│   │   │   ├── __init__.py
│   │   │   ├── accuracy.py
│   │   │   ├── ae_loss.py
│   │   │   ├── balanced_l1_loss.py
│   │   │   ├── cross_entropy_loss.py
│   │   │   ├── dice_loss.py
│   │   │   ├── focal_loss.py
│   │   │   ├── gaussian_focal_loss.py
│   │   │   ├── gfocal_loss.py
│   │   │   ├── ghm_loss.py
│   │   │   ├── iou_loss.py
│   │   │   ├── kd_loss.py
│   │   │   ├── mse_loss.py
│   │   │   ├── pisa_loss.py
│   │   │   ├── seesaw_loss.py
│   │   │   ├── smooth_l1_loss.py
│   │   │   ├── utils.py
│   │   │   └── varifocal_loss.py
│   │   ├── necks/
│   │   │   ├── __init__.py
│   │   │   ├── bfp.py
│   │   │   ├── channel_mapper.py
│   │   │   ├── ct_resnet_neck.py
│   │   │   ├── dilated_encoder.py
│   │   │   ├── dyhead.py
│   │   │   ├── fpg.py
│   │   │   ├── fpn.py
│   │   │   ├── fpn_carafe.py
│   │   │   ├── hrfpn.py
│   │   │   ├── nas_fpn.py
│   │   │   ├── nasfcos_fpn.py
│   │   │   ├── pafpn.py
│   │   │   ├── rfp.py
│   │   │   ├── ssd_neck.py
│   │   │   ├── yolo_neck.py
│   │   │   └── yolox_pafpn.py
│   │   ├── plugins/
│   │   │   ├── __init__.py
│   │   │   ├── dropblock.py
│   │   │   ├── msdeformattn_pixel_decoder.py
│   │   │   └── pixel_decoder.py
│   │   ├── roi_heads/
│   │   │   ├── __init__.py
│   │   │   ├── base_roi_head.py
│   │   │   ├── bbox_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── bbox_head.py
│   │   │   │   ├── convfc_bbox_head.py
│   │   │   │   ├── dii_head.py
│   │   │   │   ├── double_bbox_head.py
│   │   │   │   ├── sabl_head.py
│   │   │   │   └── scnet_bbox_head.py
│   │   │   ├── cascade_roi_head.py
│   │   │   ├── double_roi_head.py
│   │   │   ├── dynamic_roi_head.py
│   │   │   ├── grid_roi_head.py
│   │   │   ├── htc_roi_head.py
│   │   │   ├── mask_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── coarse_mask_head.py
│   │   │   │   ├── dynamic_mask_head.py
│   │   │   │   ├── fcn_mask_head.py
│   │   │   │   ├── feature_relay_head.py
│   │   │   │   ├── fused_semantic_head.py
│   │   │   │   ├── global_context_head.py
│   │   │   │   ├── grid_head.py
│   │   │   │   ├── htc_mask_head.py
│   │   │   │   ├── mask_point_head.py
│   │   │   │   ├── maskiou_head.py
│   │   │   │   ├── scnet_mask_head.py
│   │   │   │   └── scnet_semantic_head.py
│   │   │   ├── mask_scoring_roi_head.py
│   │   │   ├── pisa_roi_head.py
│   │   │   ├── point_rend_roi_head.py
│   │   │   ├── roi_extractors/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_roi_extractor.py
│   │   │   │   ├── generic_roi_extractor.py
│   │   │   │   └── single_level_roi_extractor.py
│   │   │   ├── scnet_roi_head.py
│   │   │   ├── shared_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   └── res_layer.py
│   │   │   ├── sparse_roi_head.py
│   │   │   ├── standard_roi_head.py
│   │   │   ├── test_mixins.py
│   │   │   └── trident_roi_head.py
│   │   ├── seg_heads/
│   │   │   ├── __init__.py
│   │   │   ├── base_semantic_head.py
│   │   │   ├── panoptic_fpn_head.py
│   │   │   └── panoptic_fusion_heads/
│   │   │       ├── __init__.py
│   │   │       ├── base_panoptic_fusion_head.py
│   │   │       ├── heuristic_fusion_head.py
│   │   │       └── maskformer_fusion_head.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       ├── brick_wrappers.py
│   │       ├── builder.py
│   │       ├── ckpt_convert.py
│   │       ├── conv_upsample.py
│   │       ├── csp_layer.py
│   │       ├── gaussian_target.py
│   │       ├── inverted_residual.py
│   │       ├── make_divisible.py
│   │       ├── misc.py
│   │       ├── normed_predictor.py
│   │       ├── panoptic_gt_processing.py
│   │       ├── point_sample.py
│   │       ├── positional_encoding.py
│   │       ├── res_layer.py
│   │       ├── se_layer.py
│   │       └── transformer.py
│   ├── utils/
│   │   ├── __init__.py
│   │   ├── ascend_util.py
│   │   ├── collect_env.py
│   │   ├── compat_config.py
│   │   ├── contextmanagers.py
│   │   ├── logger.py
│   │   ├── memory.py
│   │   ├── misc.py
│   │   ├── profiling.py
│   │   ├── replace_cfg_vals.py
│   │   ├── rfnext.py
│   │   ├── setup_env.py
│   │   ├── split_batch.py
│   │   ├── util_distribution.py
│   │   ├── util_mixins.py
│   │   └── util_random.py
│   └── version.py
├── projects/
│   ├── configs/
│   │   ├── _base_/
│   │   │   ├── datasets/
│   │   │   │   ├── coco_detection.py
│   │   │   │   ├── coco_instance.py
│   │   │   │   └── coco_panoptic.py
│   │   │   └── default_runtime.py
│   │   ├── focalnet_dino/
│   │   │   ├── focalnet-l-dino_sam-vit-b.py
│   │   │   ├── focalnet-l-dino_sam-vit-h.py
│   │   │   ├── focalnet-l-dino_sam-vit-h_best-in-multi_cascade.py
│   │   │   └── focalnet-l-dino_sam-vit-l.py
│   │   └── hdetr/
│   │       ├── r50-hdetr_sam-vit-b.py
│   │       ├── r50-hdetr_sam-vit-b_best-in-multi.py
│   │       ├── r50-hdetr_sam-vit-b_best-in-multi_cascade.py
│   │       ├── r50-hdetr_sam-vit-b_cascade.py
│   │       ├── r50-hdetr_sam-vit-l.py
│   │       ├── swin-l-hdetr_sam-vit-b.py
│   │       ├── swin-l-hdetr_sam-vit-h.py
│   │       ├── swin-l-hdetr_sam-vit-h_best-in-multi_cascade.py
│   │       ├── swin-l-hdetr_sam-vit-l.py
│   │       ├── swin-t-hdetr_sam-vit-b.py
│   │       └── swin-t-hdetr_sam-vit-l.py
│   └── instance_segment_anything/
│       ├── __init__.py
│       ├── models/
│       │   ├── det_wrapper_instance_sam.py
│       │   ├── det_wrapper_instance_sam_cascade.py
│       │   ├── focalnet_dino/
│       │   │   ├── focalnet_dino_wrapper.py
│       │   │   └── models/
│       │   │       ├── __init__.py
│       │   │       └── dino/
│       │   │           ├── __init__.py
│       │   │           ├── attention.py
│       │   │           ├── backbone.py
│       │   │           ├── convnext.py
│       │   │           ├── deformable_transformer.py
│       │   │           ├── dino.py
│       │   │           ├── dn_components.py
│       │   │           ├── focal.py
│       │   │           ├── matcher.py
│       │   │           ├── position_encoding.py
│       │   │           ├── segmentation.py
│       │   │           ├── swin_transformer.py
│       │   │           ├── transformer_deformable.py
│       │   │           ├── util/
│       │   │           │   ├── __init__.py
│       │   │           │   ├── box_loss.py
│       │   │           │   ├── box_ops.py
│       │   │           │   ├── coco_id2name.json
│       │   │           │   ├── get_param_dicts.py
│       │   │           │   ├── logger.py
│       │   │           │   ├── misc.py
│       │   │           │   ├── plot_utils.py
│       │   │           │   ├── slconfig.py
│       │   │           │   ├── slio.py
│       │   │           │   ├── static_data_path.py
│       │   │           │   ├── time_counter.py
│       │   │           │   ├── utils.py
│       │   │           │   ├── vis_utils.py
│       │   │           │   └── visualizer.py
│       │   │           └── utils.py
│       │   ├── hdetr/
│       │   │   ├── hdetr_wrapper.py
│       │   │   └── models/
│       │   │       ├── __init__.py
│       │   │       ├── backbone.py
│       │   │       ├── deformable_detr.py
│       │   │       ├── deformable_transformer.py
│       │   │       ├── matcher.py
│       │   │       ├── position_encoding.py
│       │   │       ├── segmentation.py
│       │   │       ├── swin_transformer.py
│       │   │       └── util/
│       │   │           ├── __init__.py
│       │   │           ├── box_ops.py
│       │   │           ├── misc.py
│       │   │           └── plot_utils.py
│       │   └── segment_anything/
│       │       ├── __init__.py
│       │       ├── automatic_mask_generator.py
│       │       ├── build_sam.py
│       │       ├── modeling/
│       │       │   ├── __init__.py
│       │       │   ├── common.py
│       │       │   ├── image_encoder.py
│       │       │   ├── mask_decoder.py
│       │       │   ├── prompt_encoder.py
│       │       │   ├── sam.py
│       │       │   └── transformer.py
│       │       ├── predictor.py
│       │       └── utils/
│       │           ├── __init__.py
│       │           ├── amg.py
│       │           ├── onnx.py
│       │           └── transforms.py
│       └── ops/
│           ├── functions/
│           │   ├── __init__.py
│           │   └── ms_deform_attn_func.py
│           ├── make.sh
│           ├── modules/
│           │   ├── __init__.py
│           │   └── ms_deform_attn.py
│           ├── setup.py
│           ├── src/
│           │   ├── cpu/
│           │   │   ├── ms_deform_attn_cpu.cpp
│           │   │   └── ms_deform_attn_cpu.h
│           │   ├── cuda/
│           │   │   ├── ms_deform_attn_cuda.cu
│           │   │   ├── ms_deform_attn_cuda.h
│           │   │   └── ms_deform_im2col_cuda.cuh
│           │   ├── ms_deform_attn.h
│           │   └── vision.cpp
│           └── test.py
├── requirements/
│   ├── albu.txt
│   ├── build.txt
│   ├── docs.txt
│   ├── mminstall.txt
│   ├── optional.txt
│   ├── readthedocs.txt
│   ├── runtime.txt
│   └── tests.txt
├── requirements.txt
├── setup.cfg
├── setup.py
└── tools/
    ├── convert_ckpt.py
    ├── dist_test.sh
    └── test.py

================================================
FILE CONTENTS
================================================

================================================
FILE: .gitignore
================================================
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
#  Usually these files are written by a python script from a template
#  before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
.pytest_cache/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/en/_build/
docs/zh_cn/_build/

# PyBuilder
target/

# Jupyter Notebook
.ipynb_checkpoints

# pyenv
.python-version

# celery beat schedule file
celerybeat-schedule

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/

data/
data
.vscode
.idea
.DS_Store

# custom
*.pkl
*.pkl.json
*.log.json
docs/modelzoo_statistics.md
mmdet/.mim
work_dirs/
ckpt/

# Pytorch
*.pth
*.py~
*.sh~


================================================
FILE: LICENSE
================================================
Copyright 2018-2023 OpenMMLab. All rights reserved.

                                 Apache License
                           Version 2.0, January 2004
                        http://www.apache.org/licenses/

   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION

   1. Definitions.

      "License" shall mean the terms and conditions for use, reproduction,
      and distribution as defined by Sections 1 through 9 of this document.

      "Licensor" shall mean the copyright owner or entity authorized by
      the copyright owner that is granting the License.

      "Legal Entity" shall mean the union of the acting entity and all
      other entities that control, are controlled by, or are under common
      control with that entity. For the purposes of this definition,
      "control" means (i) the power, direct or indirect, to cause the
      direction or management of such entity, whether by contract or
      otherwise, or (ii) ownership of fifty percent (50%) or more of the
      outstanding shares, or (iii) beneficial ownership of such entity.

      "You" (or "Your") shall mean an individual or Legal Entity
      exercising permissions granted by this License.

      "Source" form shall mean the preferred form for making modifications,
      including but not limited to software source code, documentation
      source, and configuration files.

      "Object" form shall mean any form resulting from mechanical
      transformation or translation of a Source form, including but
      not limited to compiled object code, generated documentation,
      and conversions to other media types.

      "Work" shall mean the work of authorship, whether in Source or
      Object form, made available under the License, as indicated by a
      copyright notice that is included in or attached to the work
      (an example is provided in the Appendix below).

      "Derivative Works" shall mean any work, whether in Source or Object
      form, that is based on (or derived from) the Work and for which the
      editorial revisions, annotations, elaborations, or other modifications
      represent, as a whole, an original work of authorship. For the purposes
      of this License, Derivative Works shall not include works that remain
      separable from, or merely link (or bind by name) to the interfaces of,
      the Work and Derivative Works thereof.

      "Contribution" shall mean any work of authorship, including
      the original version of the Work and any modifications or additions
      to that Work or Derivative Works thereof, that is intentionally
      submitted to Licensor for inclusion in the Work by the copyright owner
      or by an individual or Legal Entity authorized to submit on behalf of
      the copyright owner. For the purposes of this definition, "submitted"
      means any form of electronic, verbal, or written communication sent
      to the Licensor or its representatives, including but not limited to
      communication on electronic mailing lists, source code control systems,
      and issue tracking systems that are managed by, or on behalf of, the
      Licensor for the purpose of discussing and improving the Work, but
      excluding communication that is conspicuously marked or otherwise
      designated in writing by the copyright owner as "Not a Contribution."

      "Contributor" shall mean Licensor and any individual or Legal Entity
      on behalf of whom a Contribution has been received by Licensor and
      subsequently incorporated within the Work.

   2. Grant of Copyright License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      copyright license to reproduce, prepare Derivative Works of,
      publicly display, publicly perform, sublicense, and distribute the
      Work and such Derivative Works in Source or Object form.

   3. Grant of Patent License. Subject to the terms and conditions of
      this License, each Contributor hereby grants to You a perpetual,
      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
      (except as stated in this section) patent license to make, have made,
      use, offer to sell, sell, import, and otherwise transfer the Work,
      where such license applies only to those patent claims licensable
      by such Contributor that are necessarily infringed by their
      Contribution(s) alone or by combination of their Contribution(s)
      with the Work to which such Contribution(s) was submitted. If You
      institute patent litigation against any entity (including a
      cross-claim or counterclaim in a lawsuit) alleging that the Work
      or a Contribution incorporated within the Work constitutes direct
      or contributory patent infringement, then any patent licenses
      granted to You under this License for that Work shall terminate
      as of the date such litigation is filed.

   4. Redistribution. You may reproduce and distribute copies of the
      Work or Derivative Works thereof in any medium, with or without
      modifications, and in Source or Object form, provided that You
      meet the following conditions:

      (a) You must give any other recipients of the Work or
          Derivative Works a copy of this License; and

      (b) You must cause any modified files to carry prominent notices
          stating that You changed the files; and

      (c) You must retain, in the Source form of any Derivative Works
          that You distribute, all copyright, patent, trademark, and
          attribution notices from the Source form of the Work,
          excluding those notices that do not pertain to any part of
          the Derivative Works; and

      (d) If the Work includes a "NOTICE" text file as part of its
          distribution, then any Derivative Works that You distribute must
          include a readable copy of the attribution notices contained
          within such NOTICE file, excluding those notices that do not
          pertain to any part of the Derivative Works, in at least one
          of the following places: within a NOTICE text file distributed
          as part of the Derivative Works; within the Source form or
          documentation, if provided along with the Derivative Works; or,
          within a display generated by the Derivative Works, if and
          wherever such third-party notices normally appear. The contents
          of the NOTICE file are for informational purposes only and
          do not modify the License. You may add Your own attribution
          notices within Derivative Works that You distribute, alongside
          or as an addendum to the NOTICE text from the Work, provided
          that such additional attribution notices cannot be construed
          as modifying the License.

      You may add Your own copyright statement to Your modifications and
      may provide additional or different license terms and conditions
      for use, reproduction, or distribution of Your modifications, or
      for any such Derivative Works as a whole, provided Your use,
      reproduction, and distribution of the Work otherwise complies with
      the conditions stated in this License.

   5. Submission of Contributions. Unless You explicitly state otherwise,
      any Contribution intentionally submitted for inclusion in the Work
      by You to the Licensor shall be under the terms and conditions of
      this License, without any additional terms or conditions.
      Notwithstanding the above, nothing herein shall supersede or modify
      the terms of any separate license agreement you may have executed
      with Licensor regarding such Contributions.

   6. Trademarks. This License does not grant permission to use the trade
      names, trademarks, service marks, or product names of the Licensor,
      except as required for reasonable and customary use in describing the
      origin of the Work and reproducing the content of the NOTICE file.

   7. Disclaimer of Warranty. Unless required by applicable law or
      agreed to in writing, Licensor provides the Work (and each
      Contributor provides its Contributions) on an "AS IS" BASIS,
      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
      implied, including, without limitation, any warranties or conditions
      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
      PARTICULAR PURPOSE. You are solely responsible for determining the
      appropriateness of using or redistributing the Work and assume any
      risks associated with Your exercise of permissions under this License.

   8. Limitation of Liability. In no event and under no legal theory,
      whether in tort (including negligence), contract, or otherwise,
      unless required by applicable law (such as deliberate and grossly
      negligent acts) or agreed to in writing, shall any Contributor be
      liable to You for damages, including any direct, indirect, special,
      incidental, or consequential damages of any character arising as a
      result of this License or out of the use or inability to use the
      Work (including but not limited to damages for loss of goodwill,
      work stoppage, computer failure or malfunction, or any and all
      other commercial damages or losses), even if such Contributor
      has been advised of the possibility of such damages.

   9. Accepting Warranty or Additional Liability. While redistributing
      the Work or Derivative Works thereof, You may choose to offer,
      and charge a fee for, acceptance of support, warranty, indemnity,
      or other liability obligations and/or rights consistent with this
      License. However, in accepting such obligations, You may act only
      on Your own behalf and on Your sole responsibility, not on behalf
      of any other Contributor, and only if You agree to indemnify,
      defend, and hold each Contributor harmless for any liability
      incurred by, or claims asserted against, such Contributor by reason
      of your accepting any such warranty or additional liability.

   END OF TERMS AND CONDITIONS

   APPENDIX: How to apply the Apache License to your work.

      To apply the Apache License to your work, attach the following
      boilerplate notice, with the fields enclosed by brackets "[]"
      replaced with your own identifying information. (Don't include
      the brackets!)  The text should be enclosed in the appropriate
      comment syntax for the file format. We also recommend that a
      file or class name and description of purpose be included on the
      same "printed page" as the copyright notice for easier
      identification within third-party archives.

   Copyright 2018-2023 OpenMMLab.

   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at

       http://www.apache.org/licenses/LICENSE-2.0

   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.


================================================
FILE: README.md
================================================
# Prompt-Segment-Anything
This is an implementation of zero-shot instance segmentation using [Segment Anything](https://github.com/facebookresearch/segment-anything). Thanks to the authors of Segment Anything for their wonderful work! 

This repository is based on [MMDetection](https://github.com/open-mmlab/mmdetection) and includes some code from [H-Deformable-DETR](https://github.com/HDETR/H-Deformable-DETR) and [FocalNet-DINO](https://github.com/FocalNet/FocalNet-DINO).

![example1](assets/example1.jpg)

## News

**2023.04.12** Multimask output mode and cascade prompt mode is available now.

**2023.04.11** Our [demo](https://huggingface.co/spaces/rockeycoss/Prompt-Segment-Anything-Demo) is available now. Please feel free to check it out.

**2023.04.11** [Swin-L+H-Deformable-DETR + SAM](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h.py)/[FocalNet-L+DINO + SAM](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h.py) achieves strong COCO instance segmentation results: mask AP=46.8/49.1 by simply prompting SAM with boxes predicted by Swin-L+H-Deformable-DETR/FocalNet-L+DINO. (mask AP=46.5 based on ViTDet)🍺

## Catalog

- [x] Support Swin-L+H-Deformable-DETR+SAM
- [x] Support FocalNet-L+DINO+SAM
- [x] Support R50+H-Deformable-DETR+SAM/Swin-T+H-Deformable-DETR
- [x] Support HuggingFace gradio demo
- [x] Support cascade prompts (box prompt + mask prompt)

## Box-as-Prompt Results

|         Detector         |    SAM    |    multimask ouput    | Detector's Box AP | Mask AP |                            Config                            |
| :--------------------- | :-------: | :---------------: | :-----: | :----------------------------------------------------------: | ----------------------- |
|  R50+H-Deformable-DETR   | sam-vit-b | :x: |       50.0        |  38.2   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/r50-hdetr_sam-vit-b.py) |
| R50+H-Deformable-DETR | sam-vit-b | :heavy_check_mark: | 50.0 | 39.9 | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/r50-hdetr_sam-vit-b_best-in-multi.py) |
|  R50+H-Deformable-DETR   | sam-vit-l | :x: |       50.0        |  41.5   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/r50-hdetr_sam-vit-l.py) |
| Swin-T+H-Deformable-DETR | sam-vit-b | :x: |       53.2        |  40.0   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-t-hdetr_sam-vit-b.py) |
| Swin-T+H-Deformable-DETR | sam-vit-l | :x: |       53.2        |  43.5   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-t-hdetr_sam-vit-l.py) |
| Swin-L+H-Deformable-DETR | sam-vit-b | :x: |       58.0        |  42.5   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-b.py) |
| Swin-L+H-Deformable-DETR | sam-vit-l | :x: |       58.0        |  46.3   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-l.py) |
| Swin-L+H-Deformable-DETR | sam-vit-h | :x: |       58.0        |  46.8   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h.py) |
|     FocalNet-L+DINO      | sam-vit-b | :x: |       63.2        |  44.5   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-b.py) |
|     FocalNet-L+DINO      | sam-vit-l | :x: |       63.2        |  48.6   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-l.py) |
|     FocalNet-L+DINO      | sam-vit-h | :x: |       63.2        |  49.1   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h.py) |

## Cascade-Prompt Results

|       Detector        |    SAM    |  multimask ouput   | Detector's Box AP | Mask AP | Config                                                       |
| :------------------- | :-------: | :----------------: | :---------------: | :-----: | ------------------------------------------------------------ |
| R50+H-Deformable-DETR | sam-vit-b |        :x:         |       50.0        |  38.8   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/r50-hdetr_sam-vit-b_cascade.py) |
| R50+H-Deformable-DETR | sam-vit-b | :heavy_check_mark: |       50.0        |  40.5   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/r50-hdetr_sam-vit-b_best-in-multi_cascade.py) |
| Swin-L+H-Deformable-DETR | sam-vit-h | :heavy_check_mark: |       58.0        |  47.3   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h_best-in-multi_cascade.py) |
|     FocalNet-L+DINO      | sam-vit-h | :heavy_check_mark: |       63.2        |  49.6   | [config](https://github.com/RockeyCoss/Instance-Segment-Anything/blob/master/projects/configs/hdetr/swin-l-hdetr_sam-vit-h_best-in-multi_cascade.py) |

***Note***

**multimask ouput**: If multimask output is :heavy_check_mark:, SAM will predict three masks for each prompt, and the segmentation result will be the one with the highest predicted IoU. Otherwise, if multimask output is :x:, SAM will return only one mask for each prompt, which will be used as the segmentation result.

**cascade-prompt**: In the cascade-prompt setting, the segmentation process involves two stages. In the first stage, a coarse mask is predicted with a bounding box prompt. The second stage then utilizes both the bounding box and the coarse mask as prompts to predict the final segmentation result. Note that if multimask output is :heavy_check_mark:, the first stage will predict three coarse masks, and the second stage will use the mask with the highest predicted IoU as the prompt.

## Installation

🍺🍺🍺 Add dockerhub enviroment 

```
docker pull kxqt/prompt-sam-torch1.12-cuda11.6:20230410
nvidia-docker run -it --shm-size=4096m -v {your_path}:{path_in_docker} kxqt/prompt-sam-torch1.12-cuda11.6:20230410
```

We test the models under `python=3.7.10,pytorch=1.10.2,cuda=10.2`. Other versions might be available as well.

1. Clone this repository

```
git clone https://github.com/RockeyCoss/Instance-Segment-Anything
cd Instance-Segment-Anything
```

2. Install PyTorch

```bash
# an example
pip install torch torchvision
```

3. Install MMCV

```
pip install -U openmim
mim install "mmcv-full<2.0.0"
```

4. Install MMDetection's requirements

```
pip install -r requirements.txt
```

5. Compile CUDA operators

```bash
cd projects/instance_segment_anything/ops
python setup.py build install
cd ../../..
```

Please note that the ``mmdet`` package does not need to be installed. If your environment already has the ``mmdet`` package installed, you can run the following command before executing other scripts:

```bash
export PYTHONPATH=$(pwd)
```

## Prepare COCO Dataset

Please refer to [data preparation](https://mmdetection.readthedocs.io/en/latest/user_guides/dataset_prepare.html).

## Prepare Checkpoints

1. Install wget

```
pip install wget
```

2. SAM checkpoints

```bash
mkdir ckpt
cd ckpt
python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth
python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth
python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth
cd ..
```

3. Here are the checkpoints for the detection models. You can download only the checkpoints you need.

```bash
# R50+H-Deformable-DETR
cd ckpt
python -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1/r50_hybrid_branch_lambda1_group6_t1500_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o r50_hdetr.pth
cd ..
python tools/convert_ckpt.py ckpt/r50_hdetr.pth ckpt/r50_hdetr.pth

# Swin-T+H-Deformable-DETR
cd ckpt
python -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1/swin_tiny_hybrid_branch_lambda1_group6_t1500_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o swin_t_hdetr.pth
cd ..
python tools/convert_ckpt.py ckpt/swin_t_hdetr.pth ckpt/swin_t_hdetr.pth

# Swin-L+H-Deformable-DETR
cd ckpt
python -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1/decay0.05_drop_path0.5_swin_large_hybrid_branch_lambda1_group6_t1500_n900_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o swin_l_hdetr.pth
cd ..
python tools/convert_ckpt.py ckpt/swin_l_hdetr.pth ckpt/swin_l_hdetr.pth

# FocalNet-L+DINO
cd ckpt
python -m wget https://projects4jw.blob.core.windows.net/focalnet/release/detection/focalnet_large_fl4_o365_finetuned_on_coco.pth -o focalnet_l_dino.pth
cd ..
python tools/convert_ckpt.py ckpt/focalnet_l_dino.pth ckpt/focalnet_l_dino.pth
```

## Run Evaluation

1. Evaluate Metrics

```bash
# single GPU
python tools/test.py path/to/the/config/file --eval segm
# multiple GPUs
bash tools/dist_test.sh path/to/the/config/file num_gpus --eval segm
```

2. Visualize Segmentation Results

```bash
python tools/test.py path/to/the/config/file --show-dir path/to/the/visualization/results
```
## Gradio Demo

We also provide a UI for displaying the segmentation results that is built with gradio. To launch the demo, simply run the following command in a terminal:

```bash
pip install gradio
python app.py
```

This demo is also hosted on HuggingFace [here](https://huggingface.co/spaces/rockeycoss/Prompt-Segment-Anything-Demo).

## More Segmentation Examples

![example2](assets/example2.jpg)
![example3](assets/example3.jpg)
![example4](assets/example4.jpg)
![example5](assets/example5.jpg)

## Citation

**Segment Anything**

```latex
@article{kirillov2023segany,
  title={Segment Anything}, 
  author={Kirillov, Alexander and Mintun, Eric and Ravi, Nikhila and Mao, Hanzi and Rolland, Chloe and Gustafson, Laura and Xiao, Tete and Whitehead, Spencer and Berg, Alexander C. and Lo, Wan-Yen and Doll{\'a}r, Piotr and Girshick, Ross},
  journal={arXiv:2304.02643},
  year={2023}
}
```
**H-Deformable-DETR**

```latex
@article{jia2022detrs,
  title={DETRs with Hybrid Matching},
  author={Jia, Ding and Yuan, Yuhui and He, Haodi and Wu, Xiaopei and Yu, Haojun and Lin, Weihong and Sun, Lei and Zhang, Chao and Hu, Han},
  journal={arXiv preprint arXiv:2207.13080},
  year={2022}
}
```
**Swin Transformer**

```latex
@inproceedings{liu2021Swin,
  title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows},
  author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining},
  booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)},
  year={2021}
}
```
**DINO**

```latex
@misc{zhang2022dino,
      title={DINO: DETR with Improved DeNoising Anchor Boxes for End-to-End Object Detection}, 
      author={Hao Zhang and Feng Li and Shilong Liu and Lei Zhang and Hang Su and Jun Zhu and Lionel M. Ni and Heung-Yeung Shum},
      year={2022},
      eprint={2203.03605},
      archivePrefix={arXiv},
      primaryClass={cs.CV}
}
```
**FocalNet**

```latex
@misc{yang2022focalnet,  
  author = {Yang, Jianwei and Li, Chunyuan and Dai, Xiyang and Yuan, Lu and Gao, Jianfeng},
  title = {Focal Modulation Networks},
  publisher = {arXiv},
  year = {2022},
}
```


================================================
FILE: app.py
================================================
import os

SPACE_ID = os.getenv('SPACE_ID')
if SPACE_ID is not None:
    # running on huggingface space
    os.system(r'mkdir ckpt')
    os.system(
        r'python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_b_01ec64.pth -o ckpt/sam_vit_b_01ec64.pth')
    os.system(
        r'python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_l_0b3195.pth -o ckpt/sam_vit_l_0b3195.pth')
    os.system(
        r'python -m wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth -o ckpt/sam_vit_h_4b8939.pth')

    os.system(
        r'python -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1'
        r'/r50_hybrid_branch_lambda1_group6_t1500_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o ckpt/r50_hdetr.pth')
    os.system(
        r'python -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1'
        r'/swin_tiny_hybrid_branch_lambda1_group6_t1500_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o ckpt/swin_t_hdetr.pth')
    os.system(
        r'python -m wget https://github.com/HDETR/H-Deformable-DETR/releases/download/v0.1/decay0.05_drop_path0'
        r'.5_swin_large_hybrid_branch_lambda1_group6_t1500_n900_dp0_mqs_lft_deformable_detr_plus_iterative_bbox_refinement_plus_plus_two_stage_36eps.pth -o ckpt/swin_l_hdetr.pth')
    os.system(r'python -m wget https://projects4jw.blob.core.windows.net/focalnet/release/detection'
              r'/focalnet_large_fl4_o365_finetuned_on_coco.pth -o ckpt/focalnet_l_dino.pth')

    os.system(r'python tools/convert_ckpt.py ckpt/r50_hdetr.pth ckpt/r50_hdetr.pth')
    os.system(r'python tools/convert_ckpt.py ckpt/swin_t_hdetr.pth ckpt/swin_t_hdetr.pth')
    os.system(r'python tools/convert_ckpt.py ckpt/swin_l_hdetr.pth ckpt/swin_l_hdetr.pth')
    os.system(r'python tools/convert_ckpt.py ckpt/focalnet_l_dino.pth ckpt/focalnet_l_dino.pth')
import warnings
from collections import OrderedDict
from pathlib import Path

import gradio as gr
import numpy as np
import torch

import mmcv
from mmcv import Config
from mmcv.ops import RoIPool
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmcv.utils import IS_CUDA_AVAILABLE, IS_MLU_AVAILABLE

from mmdet.core import get_classes
from mmdet.datasets import (CocoDataset, replace_ImageToTensor)
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector
from mmdet.utils import (compat_cfg, replace_cfg_vals, setup_multi_processes,
                         update_data_root)

config_dict = OrderedDict([('r50-hdetr_sam-vit-b', 'projects/configs/hdetr/r50-hdetr_sam-vit-b.py'),
                           ('r50-hdetr_sam-vit-l', 'projects/configs/hdetr/r50-hdetr_sam-vit-l.py'),
                           ('swin-t-hdetr_sam-vit-b', 'projects/configs/hdetr/swin-t-hdetr_sam-vit-b.py'),
                           ('swin-t-hdetr_sam-vit-l', 'projects/configs/hdetr/swin-t-hdetr_sam-vit-l.py'),
                           ('swin-l-hdetr_sam-vit-b', 'projects/configs/hdetr/swin-l-hdetr_sam-vit-b.py'),
                           ('swin-l-hdetr_sam-vit-l', 'projects/configs/hdetr/swin-l-hdetr_sam-vit-l.py'),
                           # ('swin-l-hdetr_sam-vit-h', 'projects/configs/hdetr/swin-l-hdetr_sam-vit-l.py'),
                           ('focalnet-l-dino_sam-vit-b', 'projects/configs/focalnet_dino/focalnet-l-dino_sam-vit-b.py'),
                           # ('focalnet-l-dino_sam-vit-l', 'projects/configs/focalnet_dino/focalnet-l-dino_sam-vit-l.py'),
                           # ('focalnet-l-dino_sam-vit-h', 'projects/configs/focalnet_dino/focalnet-l-dino_sam-vit-h.py')
                           ])


def init_demo_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
    """Initialize a detector from config file.
    Args:
        config (str, :obj:`Path`, or :obj:`mmcv.Config`): Config file path,
            :obj:`Path`, or the config object.
        checkpoint (str, optional): Checkpoint path. If left as None, the model
            will not load any weights.
        cfg_options (dict): Options to override some settings in the used
            config.
    Returns:
        nn.Module: The constructed detector.
    """
    if isinstance(config, (str, Path)):
        config = mmcv.Config.fromfile(config)
    elif not isinstance(config, mmcv.Config):
        raise TypeError('config must be a filename or Config object, '
                        f'but got {type(config)}')
    if cfg_options is not None:
        config.merge_from_dict(cfg_options)
    if 'pretrained' in config.model:
        config.model.pretrained = None
    elif (config.model.get('backbone', None) is not None
          and 'init_cfg' in config.model.backbone):
        config.model.backbone.init_cfg = None
    config.model.train_cfg = None
    model = build_detector(config.model, test_cfg=config.get('test_cfg'))
    if checkpoint is not None:
        checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
        if 'CLASSES' in checkpoint.get('meta', {}):
            model.CLASSES = checkpoint['meta']['CLASSES']
        else:
            warnings.simplefilter('once')
            warnings.warn('Class names are not saved in the checkpoint\'s '
                          'meta data, use COCO classes by default.')
            model.CLASSES = get_classes('coco')
    model.cfg = config  # save the config in the model for convenience
    model.to(device)
    model.eval()

    if device == 'npu':
        from mmcv.device.npu import NPUDataParallel
        model = NPUDataParallel(model)
        model.cfg = config

    return model


def inference_demo_detector(model, imgs):
    """Inference image(s) with the detector.
    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):
           Either image files or loaded images.
    Returns:
        If imgs is a list or tuple, the same length list type results
        will be returned, otherwise return the detection results directly.
    """
    ori_img = imgs
    if isinstance(imgs, (list, tuple)):
        is_batch = True
    else:
        imgs = [imgs]
        is_batch = False

    cfg = model.cfg
    device = next(model.parameters()).device  # model device

    if isinstance(imgs[0], np.ndarray):
        cfg = cfg.copy()
        # set loading pipeline type
        cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'

    cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
    test_pipeline = Compose(cfg.data.test.pipeline)

    datas = []
    for img in imgs:
        # prepare data
        if isinstance(img, np.ndarray):
            # directly add img
            data = dict(img=img)
        else:
            # add information into dict
            data = dict(img_info=dict(filename=img), img_prefix=None)
        # build the data pipeline
        data = test_pipeline(data)
        datas.append(data)

    data = collate(datas, samples_per_gpu=len(imgs))
    # just get the actual data from DataContainer
    data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
    data['img'] = [img.data[0] for img in data['img']]
    if next(model.parameters()).is_cuda:
        # scatter to specified GPU
        data = scatter(data, [device])[0]
    else:
        for m in model.modules():
            assert not isinstance(
                m, RoIPool
            ), 'CPU inference with RoIPool is not supported currently.'

    # forward the model
    with torch.no_grad():
        results = model(return_loss=False, rescale=True, **data, ori_img=ori_img)

    if not is_batch:
        return results[0]
    else:
        return results


def inference(img, config):
    if img is None:
        return None
    print(f"config: {config}")
    config = config_dict[config]
    cfg = Config.fromfile(config)

    # replace the ${key} with the value of cfg.key
    cfg = replace_cfg_vals(cfg)

    # update data root according to MMDET_DATASETS
    update_data_root(cfg)

    cfg = compat_cfg(cfg)

    # set multi-process settings
    setup_multi_processes(cfg)

    # import modules from plguin/xx, registry will be updated
    if hasattr(cfg, 'plugin'):
        if cfg.plugin:
            import importlib
            if hasattr(cfg, 'plugin_dir'):
                plugin_dir = cfg.plugin_dir
                _module_dir = os.path.dirname(plugin_dir)
                _module_dir = _module_dir.split('/')
                _module_path = _module_dir[0]

                for m in _module_dir[1:]:
                    _module_path = _module_path + '.' + m
                print(_module_path)
                plg_lib = importlib.import_module(_module_path)
            else:
                # import dir is the dirpath for the config file
                _module_dir = os.path.dirname(config)
                _module_dir = _module_dir.split('/')
                _module_path = _module_dir[0]
                for m in _module_dir[1:]:
                    _module_path = _module_path + '.' + m
                # print(_module_path)
                plg_lib = importlib.import_module(_module_path)

    # set cudnn_benchmark
    if cfg.get('cudnn_benchmark', False):
        torch.backends.cudnn.benchmark = True
    if IS_CUDA_AVAILABLE or IS_MLU_AVAILABLE:
        device = "cuda"
    else:
        device = "cpu"
    model = init_demo_detector(cfg, None, device=device)
    model.CLASSES = CocoDataset.CLASSES

    results = inference_demo_detector(model, img)
    visualize = model.show_result(
        img,
        results,
        bbox_color=CocoDataset.PALETTE,
        text_color=CocoDataset.PALETTE,
        mask_color=CocoDataset.PALETTE,
        show=False,
        out_file=None,
        score_thr=0.3
    )
    del model
    return visualize


description = """
#  <center>Prompt Segment Anything (zero-shot instance segmentation demo)</center>
Github link: [Link](https://github.com/RockeyCoss/Prompt-Segment-Anything)
You can select the model you want to use from the "Model" dropdown menu and click "Submit" to segment the image you uploaded to the "Input Image" box.
"""
if SPACE_ID is not None:
    description += f'\n<p>For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. <a href="https://huggingface.co/spaces/{SPACE_ID}?duplicate=true"><img style="display: inline; margin-top: 0em; margin-bottom: 0em" src="https://bit.ly/3gLdBN6" alt="Duplicate Space" /></a></p>'


def main():
    with gr.Blocks() as demo:
        gr.Markdown(description)
        with gr.Column():
            with gr.Row():
                with gr.Column():
                    input_img = gr.Image(type="numpy", label="Input Image")
                    model_type = gr.Dropdown(choices=list(config_dict.keys()),
                                             value=list(config_dict.keys())[0],
                                             label='Model',
                                             multiselect=False)
                    with gr.Row():
                        clear_btn = gr.Button(value="Clear")
                        submit_btn = gr.Button(value="Submit")
                output_img = gr.Image(type="numpy", label="Output")
            gr.Examples(
                examples=[["./assets/img1.jpg", "r50-hdetr_sam-vit-b"],
                          ["./assets/img2.jpg", "r50-hdetr_sam-vit-b"],
                          ["./assets/img3.jpg", "r50-hdetr_sam-vit-b"],
                          ["./assets/img4.jpg", "r50-hdetr_sam-vit-b"]],
                inputs=[input_img, model_type],
                outputs=output_img,
                fn=inference
            )

        submit_btn.click(inference,
                         inputs=[input_img, model_type],
                         outputs=output_img)
        clear_btn.click(lambda: [None, None], None, [input_img, output_img], queue=False)

    demo.queue()
    demo.launch()


if __name__ == '__main__':
    main()


================================================
FILE: mmdet/__init__.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv

from .version import __version__, short_version


def digit_version(version_str):
    digit_version = []
    for x in version_str.split('.'):
        if x.isdigit():
            digit_version.append(int(x))
        elif x.find('rc') != -1:
            patch_version = x.split('rc')
            digit_version.append(int(patch_version[0]) - 1)
            digit_version.append(int(patch_version[1]))
    return digit_version


mmcv_minimum_version = '1.3.17'
mmcv_maximum_version = '1.8.0'
mmcv_version = digit_version(mmcv.__version__)


assert (mmcv_version >= digit_version(mmcv_minimum_version)
        and mmcv_version <= digit_version(mmcv_maximum_version)), \
    f'MMCV=={mmcv.__version__} is used but incompatible. ' \
    f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.'

__all__ = ['__version__', 'short_version']


================================================
FILE: mmdet/apis/__init__.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
from .inference import (async_inference_detector, inference_detector,
                        init_detector, show_result_pyplot)
from .test import multi_gpu_test, single_gpu_test
from .train import (get_root_logger, init_random_seed, set_random_seed,
                    train_detector)

__all__ = [
    'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector',
    'async_inference_detector', 'inference_detector', 'show_result_pyplot',
    'multi_gpu_test', 'single_gpu_test', 'init_random_seed'
]


================================================
FILE: mmdet/apis/inference.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from pathlib import Path

import mmcv
import numpy as np
import torch
from mmcv.ops import RoIPool
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint

from mmdet.core import get_classes
from mmdet.datasets import replace_ImageToTensor
from mmdet.datasets.pipelines import Compose
from mmdet.models import build_detector


def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None):
    """Initialize a detector from config file.

    Args:
        config (str, :obj:`Path`, or :obj:`mmcv.Config`): Config file path,
            :obj:`Path`, or the config object.
        checkpoint (str, optional): Checkpoint path. If left as None, the model
            will not load any weights.
        cfg_options (dict): Options to override some settings in the used
            config.

    Returns:
        nn.Module: The constructed detector.
    """
    if isinstance(config, (str, Path)):
        config = mmcv.Config.fromfile(config)
    elif not isinstance(config, mmcv.Config):
        raise TypeError('config must be a filename or Config object, '
                        f'but got {type(config)}')
    if cfg_options is not None:
        config.merge_from_dict(cfg_options)
    if 'pretrained' in config.model:
        config.model.pretrained = None
    elif 'init_cfg' in config.model.backbone:
        config.model.backbone.init_cfg = None
    config.model.train_cfg = None
    model = build_detector(config.model, test_cfg=config.get('test_cfg'))
    if checkpoint is not None:
        checkpoint = load_checkpoint(model, checkpoint, map_location='cpu')
        if 'CLASSES' in checkpoint.get('meta', {}):
            model.CLASSES = checkpoint['meta']['CLASSES']
        else:
            warnings.simplefilter('once')
            warnings.warn('Class names are not saved in the checkpoint\'s '
                          'meta data, use COCO classes by default.')
            model.CLASSES = get_classes('coco')
    model.cfg = config  # save the config in the model for convenience
    model.to(device)
    model.eval()

    if device == 'npu':
        from mmcv.device.npu import NPUDataParallel
        model = NPUDataParallel(model)
        model.cfg = config

    return model


class LoadImage:
    """Deprecated.

    A simple pipeline to load image.
    """

    def __call__(self, results):
        """Call function to load images into results.

        Args:
            results (dict): A result dict contains the file name
                of the image to be read.
        Returns:
            dict: ``results`` will be returned containing loaded image.
        """
        warnings.simplefilter('once')
        warnings.warn('`LoadImage` is deprecated and will be removed in '
                      'future releases. You may use `LoadImageFromWebcam` '
                      'from `mmdet.datasets.pipelines.` instead.')
        if isinstance(results['img'], str):
            results['filename'] = results['img']
            results['ori_filename'] = results['img']
        else:
            results['filename'] = None
            results['ori_filename'] = None
        img = mmcv.imread(results['img'])
        results['img'] = img
        results['img_fields'] = ['img']
        results['img_shape'] = img.shape
        results['ori_shape'] = img.shape
        return results


def inference_detector(model, imgs):
    """Inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]):
           Either image files or loaded images.

    Returns:
        If imgs is a list or tuple, the same length list type results
        will be returned, otherwise return the detection results directly.
    """

    if isinstance(imgs, (list, tuple)):
        is_batch = True
    else:
        imgs = [imgs]
        is_batch = False

    cfg = model.cfg
    device = next(model.parameters()).device  # model device

    if isinstance(imgs[0], np.ndarray):
        cfg = cfg.copy()
        # set loading pipeline type
        cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'

    cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
    test_pipeline = Compose(cfg.data.test.pipeline)

    datas = []
    for img in imgs:
        # prepare data
        if isinstance(img, np.ndarray):
            # directly add img
            data = dict(img=img)
        else:
            # add information into dict
            data = dict(img_info=dict(filename=img), img_prefix=None)
        # build the data pipeline
        data = test_pipeline(data)
        datas.append(data)

    data = collate(datas, samples_per_gpu=len(imgs))
    # just get the actual data from DataContainer
    data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
    data['img'] = [img.data[0] for img in data['img']]
    if next(model.parameters()).is_cuda:
        # scatter to specified GPU
        data = scatter(data, [device])[0]
    else:
        for m in model.modules():
            assert not isinstance(
                m, RoIPool
            ), 'CPU inference with RoIPool is not supported currently.'

    # forward the model
    with torch.no_grad():
        results = model(return_loss=False, rescale=True, **data)

    if not is_batch:
        return results[0]
    else:
        return results


async def async_inference_detector(model, imgs):
    """Async inference image(s) with the detector.

    Args:
        model (nn.Module): The loaded detector.
        img (str | ndarray): Either image files or loaded images.

    Returns:
        Awaitable detection results.
    """
    if not isinstance(imgs, (list, tuple)):
        imgs = [imgs]

    cfg = model.cfg
    device = next(model.parameters()).device  # model device

    if isinstance(imgs[0], np.ndarray):
        cfg = cfg.copy()
        # set loading pipeline type
        cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam'

    cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
    test_pipeline = Compose(cfg.data.test.pipeline)

    datas = []
    for img in imgs:
        # prepare data
        if isinstance(img, np.ndarray):
            # directly add img
            data = dict(img=img)
        else:
            # add information into dict
            data = dict(img_info=dict(filename=img), img_prefix=None)
        # build the data pipeline
        data = test_pipeline(data)
        datas.append(data)

    data = collate(datas, samples_per_gpu=len(imgs))
    # just get the actual data from DataContainer
    data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']]
    data['img'] = [img.data[0] for img in data['img']]
    if next(model.parameters()).is_cuda:
        # scatter to specified GPU
        data = scatter(data, [device])[0]
    else:
        for m in model.modules():
            assert not isinstance(
                m, RoIPool
            ), 'CPU inference with RoIPool is not supported currently.'

    # We don't restore `torch.is_grad_enabled()` value during concurrent
    # inference since execution can overlap
    torch.set_grad_enabled(False)
    results = await model.aforward_test(rescale=True, **data)
    return results


def show_result_pyplot(model,
                       img,
                       result,
                       score_thr=0.3,
                       title='result',
                       wait_time=0,
                       palette=None,
                       out_file=None):
    """Visualize the detection results on the image.

    Args:
        model (nn.Module): The loaded detector.
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        score_thr (float): The threshold to visualize the bboxes and masks.
        title (str): Title of the pyplot figure.
        wait_time (float): Value of waitKey param. Default: 0.
        palette (str or tuple(int) or :obj:`Color`): Color.
            The tuple of color should be in BGR order.
        out_file (str or None): The path to write the image.
            Default: None.
    """
    if hasattr(model, 'module'):
        model = model.module
    model.show_result(
        img,
        result,
        score_thr=score_thr,
        show=True,
        wait_time=wait_time,
        win_name=title,
        bbox_color=palette,
        text_color=(200, 200, 200),
        mask_color=palette,
        out_file=out_file)


================================================
FILE: mmdet/apis/test.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pickle
import shutil
import tempfile
import time

import mmcv
import torch
import torch.distributed as dist
from mmcv.image import tensor2imgs
from mmcv.runner import get_dist_info

from mmdet.core import encode_mask_results


def single_gpu_test(model,
                    data_loader,
                    show=False,
                    out_dir=None,
                    show_score_thr=0.3):
    model.eval()
    results = []
    dataset = data_loader.dataset
    PALETTE = getattr(dataset, 'PALETTE', None)
    prog_bar = mmcv.ProgressBar(len(dataset))
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)

        batch_size = len(result)
        if show or out_dir:
            if batch_size == 1 and isinstance(data['img'][0], torch.Tensor):
                img_tensor = data['img'][0]
            else:
                img_tensor = data['img'][0].data[0]
            img_metas = data['img_metas'][0].data[0]
            imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg'])
            assert len(imgs) == len(img_metas)

            for i, (img, img_meta) in enumerate(zip(imgs, img_metas)):
                h, w, _ = img_meta['img_shape']
                img_show = img[:h, :w, :]

                ori_h, ori_w = img_meta['ori_shape'][:-1]
                img_show = mmcv.imresize(img_show, (ori_w, ori_h))

                if out_dir:
                    out_file = osp.join(out_dir, img_meta['ori_filename'])
                else:
                    out_file = None

                model.module.show_result(
                    img_show,
                    result[i],
                    bbox_color=PALETTE,
                    text_color=PALETTE,
                    mask_color=PALETTE,
                    show=show,
                    out_file=out_file,
                    score_thr=show_score_thr)

        # encode mask results
        if isinstance(result[0], tuple):
            result = [(bbox_results, encode_mask_results(mask_results))
                      for bbox_results, mask_results in result]
        # This logic is only used in panoptic segmentation test.
        elif isinstance(result[0], dict) and 'ins_results' in result[0]:
            for j in range(len(result)):
                bbox_results, mask_results = result[j]['ins_results']
                result[j]['ins_results'] = (bbox_results,
                                            encode_mask_results(mask_results))

        results.extend(result)

        for _ in range(batch_size):
            prog_bar.update()
    return results


def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
    """Test model with multiple gpus.

    This method tests model with multiple gpus and collects the results
    under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
    it encodes results to gpu tensors and use gpu communication for results
    collection. On cpu mode it saves the results on different gpus to 'tmpdir'
    and collects them by the rank 0 worker.

    Args:
        model (nn.Module): Model to be tested.
        data_loader (nn.Dataloader): Pytorch data loader.
        tmpdir (str): Path of directory to save the temporary results from
            different gpus under cpu mode.
        gpu_collect (bool): Option to use either gpu or cpu to collect results.

    Returns:
        list: The prediction results.
    """
    model.eval()
    results = []
    dataset = data_loader.dataset
    rank, world_size = get_dist_info()
    if rank == 0:
        prog_bar = mmcv.ProgressBar(len(dataset))
    time.sleep(2)  # This line can prevent deadlock problem in some cases.
    for i, data in enumerate(data_loader):
        with torch.no_grad():
            result = model(return_loss=False, rescale=True, **data)
            # encode mask results
            if isinstance(result[0], tuple):
                result = [(bbox_results, encode_mask_results(mask_results))
                          for bbox_results, mask_results in result]
            # This logic is only used in panoptic segmentation test.
            elif isinstance(result[0], dict) and 'ins_results' in result[0]:
                for j in range(len(result)):
                    bbox_results, mask_results = result[j]['ins_results']
                    result[j]['ins_results'] = (
                        bbox_results, encode_mask_results(mask_results))

        results.extend(result)

        if rank == 0:
            batch_size = len(result)
            for _ in range(batch_size * world_size):
                prog_bar.update()

    # collect results from all ranks
    if gpu_collect:
        results = collect_results_gpu(results, len(dataset))
    else:
        results = collect_results_cpu(results, len(dataset), tmpdir)
    return results


def collect_results_cpu(result_part, size, tmpdir=None):
    rank, world_size = get_dist_info()
    # create a tmp dir if it is not specified
    if tmpdir is None:
        MAX_LEN = 512
        # 32 is whitespace
        dir_tensor = torch.full((MAX_LEN, ),
                                32,
                                dtype=torch.uint8,
                                device='cuda')
        if rank == 0:
            mmcv.mkdir_or_exist('.dist_test')
            tmpdir = tempfile.mkdtemp(dir='.dist_test')
            tmpdir = torch.tensor(
                bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
            dir_tensor[:len(tmpdir)] = tmpdir
        dist.broadcast(dir_tensor, 0)
        tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
    else:
        mmcv.mkdir_or_exist(tmpdir)
    # dump the part result to the dir
    mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl'))
    dist.barrier()
    # collect all parts
    if rank != 0:
        return None
    else:
        # load results of all parts from tmp dir
        part_list = []
        for i in range(world_size):
            part_file = osp.join(tmpdir, f'part_{i}.pkl')
            part_list.append(mmcv.load(part_file))
        # sort the results
        ordered_results = []
        for res in zip(*part_list):
            ordered_results.extend(list(res))
        # the dataloader may pad some samples
        ordered_results = ordered_results[:size]
        # remove tmp dir
        shutil.rmtree(tmpdir)
        return ordered_results


def collect_results_gpu(result_part, size):
    rank, world_size = get_dist_info()
    # dump result part to tensor with pickle
    part_tensor = torch.tensor(
        bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
    # gather all result part tensor shape
    shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
    shape_list = [shape_tensor.clone() for _ in range(world_size)]
    dist.all_gather(shape_list, shape_tensor)
    # padding result part tensor to max length
    shape_max = torch.tensor(shape_list).max()
    part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
    part_send[:shape_tensor[0]] = part_tensor
    part_recv_list = [
        part_tensor.new_zeros(shape_max) for _ in range(world_size)
    ]
    # gather all result part
    dist.all_gather(part_recv_list, part_send)

    if rank == 0:
        part_list = []
        for recv, shape in zip(part_recv_list, shape_list):
            part_list.append(
                pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
        # sort the results
        ordered_results = []
        for res in zip(*part_list):
            ordered_results.extend(list(res))
        # the dataloader may pad some samples
        ordered_results = ordered_results[:size]
        return ordered_results


================================================
FILE: mmdet/apis/train.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import os
import random

import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner,
                         Fp16OptimizerHook, OptimizerHook, build_runner,
                         get_dist_info)

from mmdet.core import DistEvalHook, EvalHook, build_optimizer
from mmdet.datasets import (build_dataloader, build_dataset,
                            replace_ImageToTensor)
from mmdet.utils import (build_ddp, build_dp, compat_cfg,
                         find_latest_checkpoint, get_root_logger)


def init_random_seed(seed=None, device='cuda'):
    """Initialize random seed.

    If the seed is not set, the seed will be automatically randomized,
    and then broadcast to all processes to prevent some potential bugs.

    Args:
        seed (int, Optional): The seed. Default to None.
        device (str): The device where the seed will be put on.
            Default to 'cuda'.

    Returns:
        int: Seed to be used.
    """
    if seed is not None:
        return seed

    # Make sure all ranks share the same random seed to prevent
    # some potential bugs. Please refer to
    # https://github.com/open-mmlab/mmdetection/issues/6339
    rank, world_size = get_dist_info()
    seed = np.random.randint(2**31)
    if world_size == 1:
        return seed

    if rank == 0:
        random_num = torch.tensor(seed, dtype=torch.int32, device=device)
    else:
        random_num = torch.tensor(0, dtype=torch.int32, device=device)
    dist.broadcast(random_num, src=0)
    return random_num.item()


def set_random_seed(seed, deterministic=False):
    """Set random seed.

    Args:
        seed (int): Seed to be used.
        deterministic (bool): Whether to set the deterministic option for
            CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
            to True and `torch.backends.cudnn.benchmark` to False.
            Default: False.
    """
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    if deterministic:
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False


def auto_scale_lr(cfg, distributed, logger):
    """Automatically scaling LR according to GPU number and sample per GPU.

    Args:
        cfg (config): Training config.
        distributed (bool): Using distributed or not.
        logger (logging.Logger): Logger.
    """
    # Get flag from config
    if ('auto_scale_lr' not in cfg) or \
            (not cfg.auto_scale_lr.get('enable', False)):
        logger.info('Automatic scaling of learning rate (LR)'
                    ' has been disabled.')
        return

    # Get base batch size from config
    base_batch_size = cfg.auto_scale_lr.get('base_batch_size', None)
    if base_batch_size is None:
        return

    # Get gpu number
    if distributed:
        _, world_size = get_dist_info()
        num_gpus = len(range(world_size))
    else:
        num_gpus = len(cfg.gpu_ids)

    # calculate the batch size
    samples_per_gpu = cfg.data.train_dataloader.samples_per_gpu
    batch_size = num_gpus * samples_per_gpu
    logger.info(f'Training with {num_gpus} GPU(s) with {samples_per_gpu} '
                f'samples per GPU. The total batch size is {batch_size}.')

    if batch_size != base_batch_size:
        # scale LR with
        # [linear scaling rule](https://arxiv.org/abs/1706.02677)
        scaled_lr = (batch_size / base_batch_size) * cfg.optimizer.lr
        logger.info('LR has been automatically scaled '
                    f'from {cfg.optimizer.lr} to {scaled_lr}')
        cfg.optimizer.lr = scaled_lr
    else:
        logger.info('The batch size match the '
                    f'base batch size: {base_batch_size}, '
                    f'will not scaling the LR ({cfg.optimizer.lr}).')


def train_detector(model,
                   dataset,
                   cfg,
                   distributed=False,
                   validate=False,
                   timestamp=None,
                   meta=None):

    cfg = compat_cfg(cfg)
    logger = get_root_logger(log_level=cfg.log_level)

    # prepare data loaders
    dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]

    runner_type = 'EpochBasedRunner' if 'runner' not in cfg else cfg.runner[
        'type']

    train_dataloader_default_args = dict(
        samples_per_gpu=2,
        workers_per_gpu=2,
        # `num_gpus` will be ignored if distributed
        num_gpus=len(cfg.gpu_ids),
        dist=distributed,
        seed=cfg.seed,
        runner_type=runner_type,
        persistent_workers=False)

    train_loader_cfg = {
        **train_dataloader_default_args,
        **cfg.data.get('train_dataloader', {})
    }

    data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]

    # put model on gpus
    if distributed:
        find_unused_parameters = cfg.get('find_unused_parameters', False)
        # Sets the `find_unused_parameters` parameter in
        # torch.nn.parallel.DistributedDataParallel
        model = build_ddp(
            model,
            cfg.device,
            device_ids=[int(os.environ['LOCAL_RANK'])],
            broadcast_buffers=False,
            find_unused_parameters=find_unused_parameters)
    else:
        model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)

    # build optimizer
    auto_scale_lr(cfg, distributed, logger)
    optimizer = build_optimizer(model, cfg.optimizer)

    runner = build_runner(
        cfg.runner,
        default_args=dict(
            model=model,
            optimizer=optimizer,
            work_dir=cfg.work_dir,
            logger=logger,
            meta=meta))

    # an ugly workaround to make .log and .log.json filenames the same
    runner.timestamp = timestamp

    # fp16 setting
    fp16_cfg = cfg.get('fp16', None)
    if fp16_cfg is None and cfg.get('device', None) == 'npu':
        fp16_cfg = dict(loss_scale='dynamic')
    if fp16_cfg is not None:
        optimizer_config = Fp16OptimizerHook(
            **cfg.optimizer_config, **fp16_cfg, distributed=distributed)
    elif distributed and 'type' not in cfg.optimizer_config:
        optimizer_config = OptimizerHook(**cfg.optimizer_config)
    else:
        optimizer_config = cfg.optimizer_config

    # register hooks
    runner.register_training_hooks(
        cfg.lr_config,
        optimizer_config,
        cfg.checkpoint_config,
        cfg.log_config,
        cfg.get('momentum_config', None),
        custom_hooks_config=cfg.get('custom_hooks', None))

    if distributed:
        if isinstance(runner, EpochBasedRunner):
            runner.register_hook(DistSamplerSeedHook())

    # register eval hooks
    if validate:
        val_dataloader_default_args = dict(
            samples_per_gpu=1,
            workers_per_gpu=2,
            dist=distributed,
            shuffle=False,
            persistent_workers=False)

        val_dataloader_args = {
            **val_dataloader_default_args,
            **cfg.data.get('val_dataloader', {})
        }
        # Support batch_size > 1 in validation

        if val_dataloader_args['samples_per_gpu'] > 1:
            # Replace 'ImageToTensor' to 'DefaultFormatBundle'
            cfg.data.val.pipeline = replace_ImageToTensor(
                cfg.data.val.pipeline)
        val_dataset = build_dataset(cfg.data.val, dict(test_mode=True))

        val_dataloader = build_dataloader(val_dataset, **val_dataloader_args)
        eval_cfg = cfg.get('evaluation', {})
        eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner'
        eval_hook = DistEvalHook if distributed else EvalHook
        # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the
        # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'.
        runner.register_hook(
            eval_hook(val_dataloader, **eval_cfg), priority='LOW')

    resume_from = None
    if cfg.resume_from is None and cfg.get('auto_resume'):
        resume_from = find_latest_checkpoint(cfg.work_dir)
    if resume_from is not None:
        cfg.resume_from = resume_from

    if cfg.resume_from:
        runner.resume(cfg.resume_from)
    elif cfg.load_from:
        runner.load_checkpoint(cfg.load_from)
    runner.run(data_loaders, cfg.workflow)


================================================
FILE: mmdet/core/__init__.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor import *  # noqa: F401, F403
from .bbox import *  # noqa: F401, F403
from .data_structures import *  # noqa: F401, F403
from .evaluation import *  # noqa: F401, F403
from .hook import *  # noqa: F401, F403
from .mask import *  # noqa: F401, F403
from .optimizers import *  # noqa: F401, F403
from .post_processing import *  # noqa: F401, F403
from .utils import *  # noqa: F401, F403


================================================
FILE: mmdet/core/anchor/__init__.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,
                               YOLOAnchorGenerator)
from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS,
                      build_anchor_generator, build_prior_generator)
from .point_generator import MlvlPointGenerator, PointGenerator
from .utils import anchor_inside_flags, calc_region, images_to_levels

__all__ = [
    'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags',
    'PointGenerator', 'images_to_levels', 'calc_region',
    'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator',
    'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator'
]


================================================
FILE: mmdet/core/anchor/anchor_generator.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import warnings

import mmcv
import numpy as np
import torch
from torch.nn.modules.utils import _pair

from .builder import PRIOR_GENERATORS


@PRIOR_GENERATORS.register_module()
class AnchorGenerator:
    """Standard anchor generator for 2D anchor-based detectors.

    Args:
        strides (list[int] | list[tuple[int, int]]): Strides of anchors
            in multiple feature levels in order (w, h).
        ratios (list[float]): The list of ratios between the height and width
            of anchors in a single level.
        scales (list[int] | None): Anchor scales for anchors in a single level.
            It cannot be set at the same time if `octave_base_scale` and
            `scales_per_octave` are set.
        base_sizes (list[int] | None): The basic sizes
            of anchors in multiple levels.
            If None is given, strides will be used as base_sizes.
            (If strides are non square, the shortest stride is taken.)
        scale_major (bool): Whether to multiply scales first when generating
            base anchors. If true, the anchors in the same row will have the
            same scales. By default it is True in V2.0
        octave_base_scale (int): The base scale of octave.
        scales_per_octave (int): Number of scales for each octave.
            `octave_base_scale` and `scales_per_octave` are usually used in
            retinanet and the `scales` should be None when they are set.
        centers (list[tuple[float, float]] | None): The centers of the anchor
            relative to the feature grid center in multiple feature levels.
            By default it is set to be None and not used. If a list of tuple of
            float is given, they will be used to shift the centers of anchors.
        center_offset (float): The offset of center in proportion to anchors'
            width and height. By default it is 0 in V2.0.

    Examples:
        >>> from mmdet.core import AnchorGenerator
        >>> self = AnchorGenerator([16], [1.], [1.], [9])
        >>> all_anchors = self.grid_priors([(2, 2)], device='cpu')
        >>> print(all_anchors)
        [tensor([[-4.5000, -4.5000,  4.5000,  4.5000],
                [11.5000, -4.5000, 20.5000,  4.5000],
                [-4.5000, 11.5000,  4.5000, 20.5000],
                [11.5000, 11.5000, 20.5000, 20.5000]])]
        >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18])
        >>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu')
        >>> print(all_anchors)
        [tensor([[-4.5000, -4.5000,  4.5000,  4.5000],
                [11.5000, -4.5000, 20.5000,  4.5000],
                [-4.5000, 11.5000,  4.5000, 20.5000],
                [11.5000, 11.5000, 20.5000, 20.5000]]), \
        tensor([[-9., -9., 9., 9.]])]
    """

    def __init__(self,
                 strides,
                 ratios,
                 scales=None,
                 base_sizes=None,
                 scale_major=True,
                 octave_base_scale=None,
                 scales_per_octave=None,
                 centers=None,
                 center_offset=0.):
        # check center and center_offset
        if center_offset != 0:
            assert centers is None, 'center cannot be set when center_offset' \
                                    f'!=0, {centers} is given.'
        if not (0 <= center_offset <= 1):
            raise ValueError('center_offset should be in range [0, 1], '
                             f'{center_offset} is given.')
        if centers is not None:
            assert len(centers) == len(strides), \
                'The number of strides should be the same as centers, got ' \
                f'{strides} and {centers}'

        # calculate base sizes of anchors
        self.strides = [_pair(stride) for stride in strides]
        self.base_sizes = [min(stride) for stride in self.strides
                           ] if base_sizes is None else base_sizes
        assert len(self.base_sizes) == len(self.strides), \
            'The number of strides should be the same as base sizes, got ' \
            f'{self.strides} and {self.base_sizes}'

        # calculate scales of anchors
        assert ((octave_base_scale is not None
                 and scales_per_octave is not None) ^ (scales is not None)), \
            'scales and octave_base_scale with scales_per_octave cannot' \
            ' be set at the same time'
        if scales is not None:
            self.scales = torch.Tensor(scales)
        elif octave_base_scale is not None and scales_per_octave is not None:
            octave_scales = np.array(
                [2**(i / scales_per_octave) for i in range(scales_per_octave)])
            scales = octave_scales * octave_base_scale
            self.scales = torch.Tensor(scales)
        else:
            raise ValueError('Either scales or octave_base_scale with '
                             'scales_per_octave should be set')

        self.octave_base_scale = octave_base_scale
        self.scales_per_octave = scales_per_octave
        self.ratios = torch.Tensor(ratios)
        self.scale_major = scale_major
        self.centers = centers
        self.center_offset = center_offset
        self.base_anchors = self.gen_base_anchors()

    @property
    def num_base_anchors(self):
        """list[int]: total number of base anchors in a feature grid"""
        return self.num_base_priors

    @property
    def num_base_priors(self):
        """list[int]: The number of priors (anchors) at a point
        on the feature grid"""
        return [base_anchors.size(0) for base_anchors in self.base_anchors]

    @property
    def num_levels(self):
        """int: number of feature levels that the generator will be applied"""
        return len(self.strides)

    def gen_base_anchors(self):
        """Generate base anchors.

        Returns:
            list(torch.Tensor): Base anchors of a feature grid in multiple \
                feature levels.
        """
        multi_level_base_anchors = []
        for i, base_size in enumerate(self.base_sizes):
            center = None
            if self.centers is not None:
                center = self.centers[i]
            multi_level_base_anchors.append(
                self.gen_single_level_base_anchors(
                    base_size,
                    scales=self.scales,
                    ratios=self.ratios,
                    center=center))
        return multi_level_base_anchors

    def gen_single_level_base_anchors(self,
                                      base_size,
                                      scales,
                                      ratios,
                                      center=None):
        """Generate base anchors of a single level.

        Args:
            base_size (int | float): Basic size of an anchor.
            scales (torch.Tensor): Scales of the anchor.
            ratios (torch.Tensor): The ratio between between the height
                and width of anchors in a single level.
            center (tuple[float], optional): The center of the base anchor
                related to a single feature grid. Defaults to None.

        Returns:
            torch.Tensor: Anchors in a single-level feature maps.
        """
        w = base_size
        h = base_size
        if center is None:
            x_center = self.center_offset * w
            y_center = self.center_offset * h
        else:
            x_center, y_center = center

        h_ratios = torch.sqrt(ratios)
        w_ratios = 1 / h_ratios
        if self.scale_major:
            ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
            hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
        else:
            ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
            hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)

        # use float anchor and the anchor's center is aligned with the
        # pixel center
        base_anchors = [
            x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws,
            y_center + 0.5 * hs
        ]
        base_anchors = torch.stack(base_anchors, dim=-1)

        return base_anchors

    def _meshgrid(self, x, y, row_major=True):
        """Generate mesh grid of x and y.

        Args:
            x (torch.Tensor): Grids of x dimension.
            y (torch.Tensor): Grids of y dimension.
            row_major (bool, optional): Whether to return y grids first.
                Defaults to True.

        Returns:
            tuple[torch.Tensor]: The mesh grids of x and y.
        """
        # use shape instead of len to keep tracing while exporting to onnx
        xx = x.repeat(y.shape[0])
        yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1)
        if row_major:
            return xx, yy
        else:
            return yy, xx

    def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda'):
        """Generate grid anchors in multiple feature levels.

        Args:
            featmap_sizes (list[tuple]): List of feature map sizes in
                multiple feature levels.
            dtype (:obj:`torch.dtype`): Dtype of priors.
                Default: torch.float32.
            device (str): The device where the anchors will be put on.

        Return:
            list[torch.Tensor]: Anchors in multiple feature levels. \
                The sizes of each tensor should be [N, 4], where \
                N = width * height * num_base_anchors, width and height \
                are the sizes of the corresponding feature level, \
                num_base_anchors is the number of anchors for that level.
        """
        assert self.num_levels == len(featmap_sizes)
        multi_level_anchors = []
        for i in range(self.num_levels):
            anchors = self.single_level_grid_priors(
                featmap_sizes[i], level_idx=i, dtype=dtype, device=device)
            multi_level_anchors.append(anchors)
        return multi_level_anchors

    def single_level_grid_priors(self,
                                 featmap_size,
                                 level_idx,
                                 dtype=torch.float32,
                                 device='cuda'):
        """Generate grid anchors of a single level.

        Note:
            This function is usually called by method ``self.grid_priors``.

        Args:
            featmap_size (tuple[int]): Size of the feature maps.
            level_idx (int): The index of corresponding feature map level.
            dtype (obj:`torch.dtype`): Date type of points.Defaults to
                ``torch.float32``.
            device (str, optional): The device the tensor will be put on.
                Defaults to 'cuda'.

        Returns:
            torch.Tensor: Anchors in the overall feature maps.
        """

        base_anchors = self.base_anchors[level_idx].to(device).to(dtype)
        feat_h, feat_w = featmap_size
        stride_w, stride_h = self.strides[level_idx]
        # First create Range with the default dtype, than convert to
        # target `dtype` for onnx exporting.
        shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w
        shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h

        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
        shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
        # first feat_w elements correspond to the first row of shifts
        # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
        # shifted anchors (K, A, 4), reshape to (K*A, 4)

        all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
        all_anchors = all_anchors.view(-1, 4)
        # first A rows correspond to A anchors of (0, 0) in feature map,
        # then (0, 1), (0, 2), ...
        return all_anchors

    def sparse_priors(self,
                      prior_idxs,
                      featmap_size,
                      level_idx,
                      dtype=torch.float32,
                      device='cuda'):
        """Generate sparse anchors according to the ``prior_idxs``.

        Args:
            prior_idxs (Tensor): The index of corresponding anchors
                in the feature map.
            featmap_size (tuple[int]): feature map size arrange as (h, w).
            level_idx (int): The level index of corresponding feature
                map.
            dtype (obj:`torch.dtype`): Date type of points.Defaults to
                ``torch.float32``.
            device (obj:`torch.device`): The device where the points is
                located.
        Returns:
            Tensor: Anchor with shape (N, 4), N should be equal to
                the length of ``prior_idxs``.
        """

        height, width = featmap_size
        num_base_anchors = self.num_base_anchors[level_idx]
        base_anchor_id = prior_idxs % num_base_anchors
        x = (prior_idxs //
             num_base_anchors) % width * self.strides[level_idx][0]
        y = (prior_idxs // width //
             num_base_anchors) % height * self.strides[level_idx][1]
        priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \
            self.base_anchors[level_idx][base_anchor_id, :].to(device)

        return priors

    def grid_anchors(self, featmap_sizes, device='cuda'):
        """Generate grid anchors in multiple feature levels.

        Args:
            featmap_sizes (list[tuple]): List of feature map sizes in
                multiple feature levels.
            device (str): Device where the anchors will be put on.

        Return:
            list[torch.Tensor]: Anchors in multiple feature levels. \
                The sizes of each tensor should be [N, 4], where \
                N = width * height * num_base_anchors, width and height \
                are the sizes of the corresponding feature level, \
                num_base_anchors is the number of anchors for that level.
        """
        warnings.warn('``grid_anchors`` would be deprecated soon. '
                      'Please use ``grid_priors`` ')

        assert self.num_levels == len(featmap_sizes)
        multi_level_anchors = []
        for i in range(self.num_levels):
            anchors = self.single_level_grid_anchors(
                self.base_anchors[i].to(device),
                featmap_sizes[i],
                self.strides[i],
                device=device)
            multi_level_anchors.append(anchors)
        return multi_level_anchors

    def single_level_grid_anchors(self,
                                  base_anchors,
                                  featmap_size,
                                  stride=(16, 16),
                                  device='cuda'):
        """Generate grid anchors of a single level.

        Note:
            This function is usually called by method ``self.grid_anchors``.

        Args:
            base_anchors (torch.Tensor): The base anchors of a feature grid.
            featmap_size (tuple[int]): Size of the feature maps.
            stride (tuple[int], optional): Stride of the feature map in order
                (w, h). Defaults to (16, 16).
            device (str, optional): Device the tensor will be put on.
                Defaults to 'cuda'.

        Returns:
            torch.Tensor: Anchors in the overall feature maps.
        """

        warnings.warn(
            '``single_level_grid_anchors`` would be deprecated soon. '
            'Please use ``single_level_grid_priors`` ')

        # keep featmap_size as Tensor instead of int, so that we
        # can convert to ONNX correctly
        feat_h, feat_w = featmap_size
        shift_x = torch.arange(0, feat_w, device=device) * stride[0]
        shift_y = torch.arange(0, feat_h, device=device) * stride[1]

        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
        shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1)
        shifts = shifts.type_as(base_anchors)
        # first feat_w elements correspond to the first row of shifts
        # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get
        # shifted anchors (K, A, 4), reshape to (K*A, 4)

        all_anchors = base_anchors[None, :, :] + shifts[:, None, :]
        all_anchors = all_anchors.view(-1, 4)
        # first A rows correspond to A anchors of (0, 0) in feature map,
        # then (0, 1), (0, 2), ...
        return all_anchors

    def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
        """Generate valid flags of anchors in multiple feature levels.

        Args:
            featmap_sizes (list(tuple)): List of feature map sizes in
                multiple feature levels.
            pad_shape (tuple): The padded shape of the image.
            device (str): Device where the anchors will be put on.

        Return:
            list(torch.Tensor): Valid flags of anchors in multiple levels.
        """
        assert self.num_levels == len(featmap_sizes)
        multi_level_flags = []
        for i in range(self.num_levels):
            anchor_stride = self.strides[i]
            feat_h, feat_w = featmap_sizes[i]
            h, w = pad_shape[:2]
            valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h)
            valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w)
            flags = self.single_level_valid_flags((feat_h, feat_w),
                                                  (valid_feat_h, valid_feat_w),
                                                  self.num_base_anchors[i],
                                                  device=device)
            multi_level_flags.append(flags)
        return multi_level_flags

    def single_level_valid_flags(self,
                                 featmap_size,
                                 valid_size,
                                 num_base_anchors,
                                 device='cuda'):
        """Generate the valid flags of anchor in a single feature map.

        Args:
            featmap_size (tuple[int]): The size of feature maps, arrange
                as (h, w).
            valid_size (tuple[int]): The valid size of the feature maps.
            num_base_anchors (int): The number of base anchors.
            device (str, optional): Device where the flags will be put on.
                Defaults to 'cuda'.

        Returns:
            torch.Tensor: The valid flags of each anchor in a single level \
                feature map.
        """
        feat_h, feat_w = featmap_size
        valid_h, valid_w = valid_size
        assert valid_h <= feat_h and valid_w <= feat_w
        valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
        valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
        valid_x[:valid_w] = 1
        valid_y[:valid_h] = 1
        valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
        valid = valid_xx & valid_yy
        valid = valid[:, None].expand(valid.size(0),
                                      num_base_anchors).contiguous().view(-1)
        return valid

    def __repr__(self):
        """str: a string that describes the module"""
        indent_str = '    '
        repr_str = self.__class__.__name__ + '(\n'
        repr_str += f'{indent_str}strides={self.strides},\n'
        repr_str += f'{indent_str}ratios={self.ratios},\n'
        repr_str += f'{indent_str}scales={self.scales},\n'
        repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
        repr_str += f'{indent_str}scale_major={self.scale_major},\n'
        repr_str += f'{indent_str}octave_base_scale='
        repr_str += f'{self.octave_base_scale},\n'
        repr_str += f'{indent_str}scales_per_octave='
        repr_str += f'{self.scales_per_octave},\n'
        repr_str += f'{indent_str}num_levels={self.num_levels}\n'
        repr_str += f'{indent_str}centers={self.centers},\n'
        repr_str += f'{indent_str}center_offset={self.center_offset})'
        return repr_str


@PRIOR_GENERATORS.register_module()
class SSDAnchorGenerator(AnchorGenerator):
    """Anchor generator for SSD.

    Args:
        strides (list[int]  | list[tuple[int, int]]): Strides of anchors
            in multiple feature levels.
        ratios (list[float]): The list of ratios between the height and width
            of anchors in a single level.
        min_sizes (list[float]): The list of minimum anchor sizes on each
            level.
        max_sizes (list[float]): The list of maximum anchor sizes on each
            level.
        basesize_ratio_range (tuple(float)): Ratio range of anchors. Being
            used when not setting min_sizes and max_sizes.
        input_size (int): Size of feature map, 300 for SSD300, 512 for
            SSD512. Being used when not setting min_sizes and max_sizes.
        scale_major (bool): Whether to multiply scales first when generating
            base anchors. If true, the anchors in the same row will have the
            same scales. It is always set to be False in SSD.
    """

    def __init__(self,
                 strides,
                 ratios,
                 min_sizes=None,
                 max_sizes=None,
                 basesize_ratio_range=(0.15, 0.9),
                 input_size=300,
                 scale_major=True):
        assert len(strides) == len(ratios)
        assert not (min_sizes is None) ^ (max_sizes is None)
        self.strides = [_pair(stride) for stride in strides]
        self.centers = [(stride[0] / 2., stride[1] / 2.)
                        for stride in self.strides]

        if min_sizes is None and max_sizes is None:
            # use hard code to generate SSD anchors
            self.input_size = input_size
            assert mmcv.is_tuple_of(basesize_ratio_range, float)
            self.basesize_ratio_range = basesize_ratio_range
            # calculate anchor ratios and sizes
            min_ratio, max_ratio = basesize_ratio_range
            min_ratio = int(min_ratio * 100)
            max_ratio = int(max_ratio * 100)
            step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2))
            min_sizes = []
            max_sizes = []
            for ratio in range(int(min_ratio), int(max_ratio) + 1, step):
                min_sizes.append(int(self.input_size * ratio / 100))
                max_sizes.append(int(self.input_size * (ratio + step) / 100))
            if self.input_size == 300:
                if basesize_ratio_range[0] == 0.15:  # SSD300 COCO
                    min_sizes.insert(0, int(self.input_size * 7 / 100))
                    max_sizes.insert(0, int(self.input_size * 15 / 100))
                elif basesize_ratio_range[0] == 0.2:  # SSD300 VOC
                    min_sizes.insert(0, int(self.input_size * 10 / 100))
                    max_sizes.insert(0, int(self.input_size * 20 / 100))
                else:
                    raise ValueError(
                        'basesize_ratio_range[0] should be either 0.15'
                        'or 0.2 when input_size is 300, got '
                        f'{basesize_ratio_range[0]}.')
            elif self.input_size == 512:
                if basesize_ratio_range[0] == 0.1:  # SSD512 COCO
                    min_sizes.insert(0, int(self.input_size * 4 / 100))
                    max_sizes.insert(0, int(self.input_size * 10 / 100))
                elif basesize_ratio_range[0] == 0.15:  # SSD512 VOC
                    min_sizes.insert(0, int(self.input_size * 7 / 100))
                    max_sizes.insert(0, int(self.input_size * 15 / 100))
                else:
                    raise ValueError(
                        'When not setting min_sizes and max_sizes,'
                        'basesize_ratio_range[0] should be either 0.1'
                        'or 0.15 when input_size is 512, got'
                        f' {basesize_ratio_range[0]}.')
            else:
                raise ValueError(
                    'Only support 300 or 512 in SSDAnchorGenerator when '
                    'not setting min_sizes and max_sizes, '
                    f'got {self.input_size}.')

        assert len(min_sizes) == len(max_sizes) == len(strides)

        anchor_ratios = []
        anchor_scales = []
        for k in range(len(self.strides)):
            scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])]
            anchor_ratio = [1.]
            for r in ratios[k]:
                anchor_ratio += [1 / r, r]  # 4 or 6 ratio
            anchor_ratios.append(torch.Tensor(anchor_ratio))
            anchor_scales.append(torch.Tensor(scales))

        self.base_sizes = min_sizes
        self.scales = anchor_scales
        self.ratios = anchor_ratios
        self.scale_major = scale_major
        self.center_offset = 0
        self.base_anchors = self.gen_base_anchors()

    def gen_base_anchors(self):
        """Generate base anchors.

        Returns:
            list(torch.Tensor): Base anchors of a feature grid in multiple \
                feature levels.
        """
        multi_level_base_anchors = []
        for i, base_size in enumerate(self.base_sizes):
            base_anchors = self.gen_single_level_base_anchors(
                base_size,
                scales=self.scales[i],
                ratios=self.ratios[i],
                center=self.centers[i])
            indices = list(range(len(self.ratios[i])))
            indices.insert(1, len(indices))
            base_anchors = torch.index_select(base_anchors, 0,
                                              torch.LongTensor(indices))
            multi_level_base_anchors.append(base_anchors)
        return multi_level_base_anchors

    def __repr__(self):
        """str: a string that describes the module"""
        indent_str = '    '
        repr_str = self.__class__.__name__ + '(\n'
        repr_str += f'{indent_str}strides={self.strides},\n'
        repr_str += f'{indent_str}scales={self.scales},\n'
        repr_str += f'{indent_str}scale_major={self.scale_major},\n'
        repr_str += f'{indent_str}input_size={self.input_size},\n'
        repr_str += f'{indent_str}scales={self.scales},\n'
        repr_str += f'{indent_str}ratios={self.ratios},\n'
        repr_str += f'{indent_str}num_levels={self.num_levels},\n'
        repr_str += f'{indent_str}base_sizes={self.base_sizes},\n'
        repr_str += f'{indent_str}basesize_ratio_range='
        repr_str += f'{self.basesize_ratio_range})'
        return repr_str


@PRIOR_GENERATORS.register_module()
class LegacyAnchorGenerator(AnchorGenerator):
    """Legacy anchor generator used in MMDetection V1.x.

    Note:
        Difference to the V2.0 anchor generator:

        1. The center offset of V1.x anchors are set to be 0.5 rather than 0.
        2. The width/height are minused by 1 when calculating the anchors' \
            centers and corners to meet the V1.x coordinate system.
        3. The anchors' corners are quantized.

    Args:
        strides (list[int] | list[tuple[int]]): Strides of anchors
            in multiple feature levels.
        ratios (list[float]): The list of ratios between the height and width
            of anchors in a single level.
        scales (list[int] | None): Anchor scales for anchors in a single level.
            It cannot be set at the same time if `octave_base_scale` and
            `scales_per_octave` are set.
        base_sizes (list[int]): The basic sizes of anchors in multiple levels.
            If None is given, strides will be used to generate base_sizes.
        scale_major (bool): Whether to multiply scales first when generating
            base anchors. If true, the anchors in the same row will have the
            same scales. By default it is True in V2.0
        octave_base_scale (int): The base scale of octave.
        scales_per_octave (int): Number of scales for each octave.
            `octave_base_scale` and `scales_per_octave` are usually used in
            retinanet and the `scales` should be None when they are set.
        centers (list[tuple[float, float]] | None): The centers of the anchor
            relative to the feature grid center in multiple feature levels.
            By default it is set to be None and not used. It a list of float
            is given, this list will be used to shift the centers of anchors.
        center_offset (float): The offset of center in proportion to anchors'
            width and height. By default it is 0.5 in V2.0 but it should be 0.5
            in v1.x models.

    Examples:
        >>> from mmdet.core import LegacyAnchorGenerator
        >>> self = LegacyAnchorGenerator(
        >>>     [16], [1.], [1.], [9], center_offset=0.5)
        >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu')
        >>> print(all_anchors)
        [tensor([[ 0.,  0.,  8.,  8.],
                [16.,  0., 24.,  8.],
                [ 0., 16.,  8., 24.],
                [16., 16., 24., 24.]])]
    """

    def gen_single_level_base_anchors(self,
                                      base_size,
                                      scales,
                                      ratios,
                                      center=None):
        """Generate base anchors of a single level.

        Note:
            The width/height of anchors are minused by 1 when calculating \
                the centers and corners to meet the V1.x coordinate system.

        Args:
            base_size (int | float): Basic size of an anchor.
            scales (torch.Tensor): Scales of the anchor.
            ratios (torch.Tensor): The ratio between between the height.
                and width of anchors in a single level.
            center (tuple[float], optional): The center of the base anchor
                related to a single feature grid. Defaults to None.

        Returns:
            torch.Tensor: Anchors in a single-level feature map.
        """
        w = base_size
        h = base_size
        if center is None:
            x_center = self.center_offset * (w - 1)
            y_center = self.center_offset * (h - 1)
        else:
            x_center, y_center = center

        h_ratios = torch.sqrt(ratios)
        w_ratios = 1 / h_ratios
        if self.scale_major:
            ws = (w * w_ratios[:, None] * scales[None, :]).view(-1)
            hs = (h * h_ratios[:, None] * scales[None, :]).view(-1)
        else:
            ws = (w * scales[:, None] * w_ratios[None, :]).view(-1)
            hs = (h * scales[:, None] * h_ratios[None, :]).view(-1)

        # use float anchor and the anchor's center is aligned with the
        # pixel center
        base_anchors = [
            x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1),
            x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1)
        ]
        base_anchors = torch.stack(base_anchors, dim=-1).round()

        return base_anchors


@PRIOR_GENERATORS.register_module()
class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
    """Legacy anchor generator used in MMDetection V1.x.

    The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator`
    can be found in `LegacyAnchorGenerator`.
    """

    def __init__(self,
                 strides,
                 ratios,
                 basesize_ratio_range,
                 input_size=300,
                 scale_major=True):
        super(LegacySSDAnchorGenerator, self).__init__(
            strides=strides,
            ratios=ratios,
            basesize_ratio_range=basesize_ratio_range,
            input_size=input_size,
            scale_major=scale_major)
        self.centers = [((stride - 1) / 2., (stride - 1) / 2.)
                        for stride in strides]
        self.base_anchors = self.gen_base_anchors()


@PRIOR_GENERATORS.register_module()
class YOLOAnchorGenerator(AnchorGenerator):
    """Anchor generator for YOLO.

    Args:
        strides (list[int] | list[tuple[int, int]]): Strides of anchors
            in multiple feature levels.
        base_sizes (list[list[tuple[int, int]]]): The basic sizes
            of anchors in multiple levels.
    """

    def __init__(self, strides, base_sizes):
        self.strides = [_pair(stride) for stride in strides]
        self.centers = [(stride[0] / 2., stride[1] / 2.)
                        for stride in self.strides]
        self.base_sizes = []
        num_anchor_per_level = len(base_sizes[0])
        for base_sizes_per_level in base_sizes:
            assert num_anchor_per_level == len(base_sizes_per_level)
            self.base_sizes.append(
                [_pair(base_size) for base_size in base_sizes_per_level])
        self.base_anchors = self.gen_base_anchors()

    @property
    def num_levels(self):
        """int: number of feature levels that the generator will be applied"""
        return len(self.base_sizes)

    def gen_base_anchors(self):
        """Generate base anchors.

        Returns:
            list(torch.Tensor): Base anchors of a feature grid in multiple \
                feature levels.
        """
        multi_level_base_anchors = []
        for i, base_sizes_per_level in enumerate(self.base_sizes):
            center = None
            if self.centers is not None:
                center = self.centers[i]
            multi_level_base_anchors.append(
                self.gen_single_level_base_anchors(base_sizes_per_level,
                                                   center))
        return multi_level_base_anchors

    def gen_single_level_base_anchors(self, base_sizes_per_level, center=None):
        """Generate base anchors of a single level.

        Args:
            base_sizes_per_level (list[tuple[int, int]]): Basic sizes of
                anchors.
            center (tuple[float], optional): The center of the base anchor
                related to a single feature grid. Defaults to None.

        Returns:
            torch.Tensor: Anchors in a single-level feature maps.
        """
        x_center, y_center = center
        base_anchors = []
        for base_size in base_sizes_per_level:
            w, h = base_size

            # use float anchor and the anchor's center is aligned with the
            # pixel center
            base_anchor = torch.Tensor([
                x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w,
                y_center + 0.5 * h
            ])
            base_anchors.append(base_anchor)
        base_anchors = torch.stack(base_anchors, dim=0)

        return base_anchors

    def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
        """Generate responsible anchor flags of grid cells in multiple scales.

        Args:
            featmap_sizes (list(tuple)): List of feature map sizes in multiple
                feature levels.
            gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
            device (str): Device where the anchors will be put on.

        Return:
            list(torch.Tensor): responsible flags of anchors in multiple level
        """
        assert self.num_levels == len(featmap_sizes)
        multi_level_responsible_flags = []
        for i in range(self.num_levels):
            anchor_stride = self.strides[i]
            flags = self.single_level_responsible_flags(
                featmap_sizes[i],
                gt_bboxes,
                anchor_stride,
                self.num_base_anchors[i],
                device=device)
            multi_level_responsible_flags.append(flags)
        return multi_level_responsible_flags

    def single_level_responsible_flags(self,
                                       featmap_size,
                                       gt_bboxes,
                                       stride,
                                       num_base_anchors,
                                       device='cuda'):
        """Generate the responsible flags of anchor in a single feature map.

        Args:
            featmap_size (tuple[int]): The size of feature maps.
            gt_bboxes (Tensor): Ground truth boxes, shape (n, 4).
            stride (tuple(int)): stride of current level
            num_base_anchors (int): The number of base anchors.
            device (str, optional): Device where the flags will be put on.
                Defaults to 'cuda'.

        Returns:
            torch.Tensor: The valid flags of each anchor in a single level \
                feature map.
        """
        feat_h, feat_w = featmap_size
        gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device)
        gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device)
        gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long()
        gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long()

        # row major indexing
        gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x

        responsible_grid = torch.zeros(
            feat_h * feat_w, dtype=torch.uint8, device=device)
        responsible_grid[gt_bboxes_grid_idx] = 1

        responsible_grid = responsible_grid[:, None].expand(
            responsible_grid.size(0), num_base_anchors).contiguous().view(-1)
        return responsible_grid


================================================
FILE: mmdet/core/anchor/builder.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import warnings

from mmcv.utils import Registry, build_from_cfg

PRIOR_GENERATORS = Registry('Generator for anchors and points')

ANCHOR_GENERATORS = PRIOR_GENERATORS


def build_prior_generator(cfg, default_args=None):
    return build_from_cfg(cfg, PRIOR_GENERATORS, default_args)


def build_anchor_generator(cfg, default_args=None):
    warnings.warn(
        '``build_anchor_generator`` would be deprecated soon, please use '
        '``build_prior_generator`` ')
    return build_prior_generator(cfg, default_args=default_args)


================================================
FILE: mmdet/core/anchor/point_generator.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch.nn.modules.utils import _pair

from .builder import PRIOR_GENERATORS


@PRIOR_GENERATORS.register_module()
class PointGenerator:

    def _meshgrid(self, x, y, row_major=True):
        xx = x.repeat(len(y))
        yy = y.view(-1, 1).repeat(1, len(x)).view(-1)
        if row_major:
            return xx, yy
        else:
            return yy, xx

    def grid_points(self, featmap_size, stride=16, device='cuda'):
        feat_h, feat_w = featmap_size
        shift_x = torch.arange(0., feat_w, device=device) * stride
        shift_y = torch.arange(0., feat_h, device=device) * stride
        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
        stride = shift_x.new_full((shift_xx.shape[0], ), stride)
        shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1)
        all_points = shifts.to(device)
        return all_points

    def valid_flags(self, featmap_size, valid_size, device='cuda'):
        feat_h, feat_w = featmap_size
        valid_h, valid_w = valid_size
        assert valid_h <= feat_h and valid_w <= feat_w
        valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
        valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
        valid_x[:valid_w] = 1
        valid_y[:valid_h] = 1
        valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
        valid = valid_xx & valid_yy
        return valid


@PRIOR_GENERATORS.register_module()
class MlvlPointGenerator:
    """Standard points generator for multi-level (Mlvl) feature maps in 2D
    points-based detectors.

    Args:
        strides (list[int] | list[tuple[int, int]]): Strides of anchors
            in multiple feature levels in order (w, h).
        offset (float): The offset of points, the value is normalized with
            corresponding stride. Defaults to 0.5.
    """

    def __init__(self, strides, offset=0.5):
        self.strides = [_pair(stride) for stride in strides]
        self.offset = offset

    @property
    def num_levels(self):
        """int: number of feature levels that the generator will be applied"""
        return len(self.strides)

    @property
    def num_base_priors(self):
        """list[int]: The number of priors (points) at a point
        on the feature grid"""
        return [1 for _ in range(len(self.strides))]

    def _meshgrid(self, x, y, row_major=True):
        yy, xx = torch.meshgrid(y, x)
        if row_major:
            # warning .flatten() would cause error in ONNX exporting
            # have to use reshape here
            return xx.reshape(-1), yy.reshape(-1)

        else:
            return yy.reshape(-1), xx.reshape(-1)

    def grid_priors(self,
                    featmap_sizes,
                    dtype=torch.float32,
                    device='cuda',
                    with_stride=False):
        """Generate grid points of multiple feature levels.

        Args:
            featmap_sizes (list[tuple]): List of feature map sizes in
                multiple feature levels, each size arrange as
                as (h, w).
            dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.
            device (str): The device where the anchors will be put on.
            with_stride (bool): Whether to concatenate the stride to
                the last dimension of points.

        Return:
            list[torch.Tensor]: Points of  multiple feature levels.
            The sizes of each tensor should be (N, 2) when with stride is
            ``False``, where N = width * height, width and height
            are the sizes of the corresponding feature level,
            and the last dimension 2 represent (coord_x, coord_y),
            otherwise the shape should be (N, 4),
            and the last dimension 4 represent
            (coord_x, coord_y, stride_w, stride_h).
        """

        assert self.num_levels == len(featmap_sizes)
        multi_level_priors = []
        for i in range(self.num_levels):
            priors = self.single_level_grid_priors(
                featmap_sizes[i],
                level_idx=i,
                dtype=dtype,
                device=device,
                with_stride=with_stride)
            multi_level_priors.append(priors)
        return multi_level_priors

    def single_level_grid_priors(self,
                                 featmap_size,
                                 level_idx,
                                 dtype=torch.float32,
                                 device='cuda',
                                 with_stride=False):
        """Generate grid Points of a single level.

        Note:
            This function is usually called by method ``self.grid_priors``.

        Args:
            featmap_size (tuple[int]): Size of the feature maps, arrange as
                (h, w).
            level_idx (int): The index of corresponding feature map level.
            dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32.
            device (str, optional): The device the tensor will be put on.
                Defaults to 'cuda'.
            with_stride (bool): Concatenate the stride to the last dimension
                of points.

        Return:
            Tensor: Points of single feature levels.
            The shape of tensor should be (N, 2) when with stride is
            ``False``, where N = width * height, width and height
            are the sizes of the corresponding feature level,
            and the last dimension 2 represent (coord_x, coord_y),
            otherwise the shape should be (N, 4),
            and the last dimension 4 represent
            (coord_x, coord_y, stride_w, stride_h).
        """
        feat_h, feat_w = featmap_size
        stride_w, stride_h = self.strides[level_idx]
        shift_x = (torch.arange(0, feat_w, device=device) +
                   self.offset) * stride_w
        # keep featmap_size as Tensor instead of int, so that we
        # can convert to ONNX correctly
        shift_x = shift_x.to(dtype)

        shift_y = (torch.arange(0, feat_h, device=device) +
                   self.offset) * stride_h
        # keep featmap_size as Tensor instead of int, so that we
        # can convert to ONNX correctly
        shift_y = shift_y.to(dtype)
        shift_xx, shift_yy = self._meshgrid(shift_x, shift_y)
        if not with_stride:
            shifts = torch.stack([shift_xx, shift_yy], dim=-1)
        else:
            # use `shape[0]` instead of `len(shift_xx)` for ONNX export
            stride_w = shift_xx.new_full((shift_xx.shape[0], ),
                                         stride_w).to(dtype)
            stride_h = shift_xx.new_full((shift_yy.shape[0], ),
                                         stride_h).to(dtype)
            shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h],
                                 dim=-1)
        all_points = shifts.to(device)
        return all_points

    def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
        """Generate valid flags of points of multiple feature levels.

        Args:
            featmap_sizes (list(tuple)): List of feature map sizes in
                multiple feature levels, each size arrange as
                as (h, w).
            pad_shape (tuple(int)): The padded shape of the image,
                 arrange as (h, w).
            device (str): The device where the anchors will be put on.

        Return:
            list(torch.Tensor): Valid flags of points of multiple levels.
        """
        assert self.num_levels == len(featmap_sizes)
        multi_level_flags = []
        for i in range(self.num_levels):
            point_stride = self.strides[i]
            feat_h, feat_w = featmap_sizes[i]
            h, w = pad_shape[:2]
            valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h)
            valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w)
            flags = self.single_level_valid_flags((feat_h, feat_w),
                                                  (valid_feat_h, valid_feat_w),
                                                  device=device)
            multi_level_flags.append(flags)
        return multi_level_flags

    def single_level_valid_flags(self,
                                 featmap_size,
                                 valid_size,
                                 device='cuda'):
        """Generate the valid flags of points of a single feature map.

        Args:
            featmap_size (tuple[int]): The size of feature maps, arrange as
                as (h, w).
            valid_size (tuple[int]): The valid size of the feature maps.
                The size arrange as as (h, w).
            device (str, optional): The device where the flags will be put on.
                Defaults to 'cuda'.

        Returns:
            torch.Tensor: The valid flags of each points in a single level \
                feature map.
        """
        feat_h, feat_w = featmap_size
        valid_h, valid_w = valid_size
        assert valid_h <= feat_h and valid_w <= feat_w
        valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device)
        valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device)
        valid_x[:valid_w] = 1
        valid_y[:valid_h] = 1
        valid_xx, valid_yy = self._meshgrid(valid_x, valid_y)
        valid = valid_xx & valid_yy
        return valid

    def sparse_priors(self,
                      prior_idxs,
                      featmap_size,
                      level_idx,
                      dtype=torch.float32,
                      device='cuda'):
        """Generate sparse points according to the ``prior_idxs``.

        Args:
            prior_idxs (Tensor): The index of corresponding anchors
                in the feature map.
            featmap_size (tuple[int]): feature map size arrange as (w, h).
            level_idx (int): The level index of corresponding feature
                map.
            dtype (obj:`torch.dtype`): Date type of points. Defaults to
                ``torch.float32``.
            device (obj:`torch.device`): The device where the points is
                located.
        Returns:
            Tensor: Anchor with shape (N, 2), N should be equal to
            the length of ``prior_idxs``. And last dimension
            2 represent (coord_x, coord_y).
        """
        height, width = featmap_size
        x = (prior_idxs % width + self.offset) * self.strides[level_idx][0]
        y = ((prior_idxs // width) % height +
             self.offset) * self.strides[level_idx][1]
        prioris = torch.stack([x, y], 1).to(dtype)
        prioris = prioris.to(device)
        return prioris


================================================
FILE: mmdet/core/anchor/utils.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import torch


def images_to_levels(target, num_levels):
    """Convert targets by image to targets by feature level.

    [target_img0, target_img1] -> [target_level0, target_level1, ...]
    """
    target = torch.stack(target, 0)
    level_targets = []
    start = 0
    for n in num_levels:
        end = start + n
        # level_targets.append(target[:, start:end].squeeze(0))
        level_targets.append(target[:, start:end])
        start = end
    return level_targets


def anchor_inside_flags(flat_anchors,
                        valid_flags,
                        img_shape,
                        allowed_border=0):
    """Check whether the anchors are inside the border.

    Args:
        flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4).
        valid_flags (torch.Tensor): An existing valid flags of anchors.
        img_shape (tuple(int)): Shape of current image.
        allowed_border (int, optional): The border to allow the valid anchor.
            Defaults to 0.

    Returns:
        torch.Tensor: Flags indicating whether the anchors are inside a \
            valid range.
    """
    img_h, img_w = img_shape[:2]
    if allowed_border >= 0:
        inside_flags = valid_flags & \
            (flat_anchors[:, 0] >= -allowed_border) & \
            (flat_anchors[:, 1] >= -allowed_border) & \
            (flat_anchors[:, 2] < img_w + allowed_border) & \
            (flat_anchors[:, 3] < img_h + allowed_border)
    else:
        inside_flags = valid_flags
    return inside_flags


def calc_region(bbox, ratio, featmap_size=None):
    """Calculate a proportional bbox region.

    The bbox center are fixed and the new h' and w' is h * ratio and w * ratio.

    Args:
        bbox (Tensor): Bboxes to calculate regions, shape (n, 4).
        ratio (float): Ratio of the output region.
        featmap_size (tuple): Feature map size used for clipping the boundary.

    Returns:
        tuple: x1, y1, x2, y2
    """
    x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long()
    y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long()
    x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long()
    y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long()
    if featmap_size is not None:
        x1 = x1.clamp(min=0, max=featmap_size[1])
        y1 = y1.clamp(min=0, max=featmap_size[0])
        x2 = x2.clamp(min=0, max=featmap_size[1])
        y2 = y2.clamp(min=0, max=featmap_size[0])
    return (x1, y1, x2, y2)


================================================
FILE: mmdet/core/bbox/__init__.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner,
                        MaxIoUAssigner, RegionAssigner)
from .builder import build_assigner, build_bbox_coder, build_sampler
from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder,
                    PseudoBBoxCoder, TBLRBBoxCoder)
from .iou_calculators import BboxOverlaps2D, bbox_overlaps
from .samplers import (BaseSampler, CombinedSampler,
                       InstanceBalancedPosSampler, IoUBalancedNegSampler,
                       OHEMSampler, PseudoSampler, RandomSampler,
                       SamplingResult, ScoreHLRSampler)
from .transforms import (bbox2distance, bbox2result, bbox2roi,
                         bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping,
                         bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh,
                         distance2bbox, find_inside_bboxes, roi2bbox)

__all__ = [
    'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner',
    'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler',
    'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler',
    'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner',
    'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back',
    'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance',
    'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder',
    'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder',
    'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy',
    'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes'
]


================================================
FILE: mmdet/core/bbox/assigners/__init__.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
from .approx_max_iou_assigner import ApproxMaxIoUAssigner
from .ascend_assign_result import AscendAssignResult
from .ascend_max_iou_assigner import AscendMaxIoUAssigner
from .assign_result import AssignResult
from .atss_assigner import ATSSAssigner
from .base_assigner import BaseAssigner
from .center_region_assigner import CenterRegionAssigner
from .grid_assigner import GridAssigner
from .hungarian_assigner import HungarianAssigner
from .mask_hungarian_assigner import MaskHungarianAssigner
from .max_iou_assigner import MaxIoUAssigner
from .point_assigner import PointAssigner
from .region_assigner import RegionAssigner
from .sim_ota_assigner import SimOTAAssigner
from .task_aligned_assigner import TaskAlignedAssigner
from .uniform_assigner import UniformAssigner

__all__ = [
    'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult',
    'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner',
    'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner',
    'TaskAlignedAssigner', 'MaskHungarianAssigner', 'AscendAssignResult',
    'AscendMaxIoUAssigner'
]


================================================
FILE: mmdet/core/bbox/assigners/approx_max_iou_assigner.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import torch

from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .max_iou_assigner import MaxIoUAssigner


@BBOX_ASSIGNERS.register_module()
class ApproxMaxIoUAssigner(MaxIoUAssigner):
    """Assign a corresponding gt bbox or background to each bbox.

    Each proposals will be assigned with an integer indicating the ground truth
     index. (semi-positive index: gt label (0-based), -1: background)

    - -1: negative sample, no assigned gt
    - semi-positive integer: positive sample, index (0-based) of assigned gt

    Args:
        pos_iou_thr (float): IoU threshold for positive bboxes.
        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
        min_pos_iou (float): Minimum iou for a bbox to be considered as a
            positive bbox. Positive samples can have smaller IoU than
            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
        gt_max_assign_all (bool): Whether to assign all bboxes with the same
            highest overlap with some gt to that gt.
        ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
            `gt_bboxes_ignore` is specified). Negative values mean not
            ignoring any bboxes.
        ignore_wrt_candidates (bool): Whether to compute the iof between
            `bboxes` and `gt_bboxes_ignore`, or the contrary.
        match_low_quality (bool): Whether to allow quality matches. This is
            usually allowed for RPN and single stage detectors, but not allowed
            in the second stage.
        gpu_assign_thr (int): The upper bound of the number of GT for GPU
            assign. When the number of gt is above this threshold, will assign
            on CPU device. Negative values mean not assign on CPU.
    """

    def __init__(self,
                 pos_iou_thr,
                 neg_iou_thr,
                 min_pos_iou=.0,
                 gt_max_assign_all=True,
                 ignore_iof_thr=-1,
                 ignore_wrt_candidates=True,
                 match_low_quality=True,
                 gpu_assign_thr=-1,
                 iou_calculator=dict(type='BboxOverlaps2D')):
        self.pos_iou_thr = pos_iou_thr
        self.neg_iou_thr = neg_iou_thr
        self.min_pos_iou = min_pos_iou
        self.gt_max_assign_all = gt_max_assign_all
        self.ignore_iof_thr = ignore_iof_thr
        self.ignore_wrt_candidates = ignore_wrt_candidates
        self.gpu_assign_thr = gpu_assign_thr
        self.match_low_quality = match_low_quality
        self.iou_calculator = build_iou_calculator(iou_calculator)

    def assign(self,
               approxs,
               squares,
               approxs_per_octave,
               gt_bboxes,
               gt_bboxes_ignore=None,
               gt_labels=None):
        """Assign gt to approxs.

        This method assign a gt bbox to each group of approxs (bboxes),
        each group of approxs is represent by a base approx (bbox) and
        will be assigned with -1, or a semi-positive number.
        background_label (-1) means negative sample,
        semi-positive number is the index (0-based) of assigned gt.
        The assignment is done in following steps, the order matters.

        1. assign every bbox to background_label (-1)
        2. use the max IoU of each group of approxs to assign
        2. assign proposals whose iou with all gts < neg_iou_thr to background
        3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
           assign it to that bbox
        4. for each gt bbox, assign its nearest proposals (may be more than
           one) to itself

        Args:
            approxs (Tensor): Bounding boxes to be assigned,
                shape(approxs_per_octave*n, 4).
            squares (Tensor): Base Bounding boxes to be assigned,
                shape(n, 4).
            approxs_per_octave (int): number of approxs per octave
            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
                labelled as `ignored`, e.g., crowd boxes in COCO.
            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).

        Returns:
            :obj:`AssignResult`: The assign result.
        """
        num_squares = squares.size(0)
        num_gts = gt_bboxes.size(0)

        if num_squares == 0 or num_gts == 0:
            # No predictions and/or truth, return empty assignment
            overlaps = approxs.new(num_gts, num_squares)
            assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
            return assign_result

        # re-organize anchors by approxs_per_octave x num_squares
        approxs = torch.transpose(
            approxs.view(num_squares, approxs_per_octave, 4), 0,
            1).contiguous().view(-1, 4)
        assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
            num_gts > self.gpu_assign_thr) else False
        # compute overlap and assign gt on CPU when number of GT is large
        if assign_on_cpu:
            device = approxs.device
            approxs = approxs.cpu()
            gt_bboxes = gt_bboxes.cpu()
            if gt_bboxes_ignore is not None:
                gt_bboxes_ignore = gt_bboxes_ignore.cpu()
            if gt_labels is not None:
                gt_labels = gt_labels.cpu()
        all_overlaps = self.iou_calculator(approxs, gt_bboxes)

        overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares,
                                        num_gts).max(dim=0)
        overlaps = torch.transpose(overlaps, 0, 1)

        if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
                and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0):
            if self.ignore_wrt_candidates:
                ignore_overlaps = self.iou_calculator(
                    squares, gt_bboxes_ignore, mode='iof')
                ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
            else:
                ignore_overlaps = self.iou_calculator(
                    gt_bboxes_ignore, squares, mode='iof')
                ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
            overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1

        assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
        if assign_on_cpu:
            assign_result.gt_inds = assign_result.gt_inds.to(device)
            assign_result.max_overlaps = assign_result.max_overlaps.to(device)
            if assign_result.labels is not None:
                assign_result.labels = assign_result.labels.to(device)
        return assign_result


================================================
FILE: mmdet/core/bbox/assigners/ascend_assign_result.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
from mmdet.utils import util_mixins


class AscendAssignResult(util_mixins.NiceRepr):
    """Stores ascend assignments between predicted and truth boxes.

    Arguments:
        batch_num_gts (list[int]): the number of truth boxes considered.
        batch_pos_mask (IntTensor): Positive samples mask in all images.
        batch_neg_mask (IntTensor): Negative samples mask in all images.
        batch_max_overlaps (FloatTensor): The max overlaps of all bboxes
            and ground truth boxes.
        batch_anchor_gt_indes(None | LongTensor): The assigned truth
            box index of all anchors.
        batch_anchor_gt_labels(None | LongTensor): The gt labels
            of all anchors
    """

    def __init__(self,
                 batch_num_gts,
                 batch_pos_mask,
                 batch_neg_mask,
                 batch_max_overlaps,
                 batch_anchor_gt_indes=None,
                 batch_anchor_gt_labels=None):
        self.batch_num_gts = batch_num_gts
        self.batch_pos_mask = batch_pos_mask
        self.batch_neg_mask = batch_neg_mask
        self.batch_max_overlaps = batch_max_overlaps
        self.batch_anchor_gt_indes = batch_anchor_gt_indes
        self.batch_anchor_gt_labels = batch_anchor_gt_labels
        # Interface for possible user-defined properties
        self._extra_properties = {}


================================================
FILE: mmdet/core/bbox/assigners/ascend_max_iou_assigner.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import torch

from ....utils import masked_fill
from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .ascend_assign_result import AscendAssignResult
from .base_assigner import BaseAssigner


@BBOX_ASSIGNERS.register_module()
class AscendMaxIoUAssigner(BaseAssigner):
    """Assign a corresponding gt bbox or background to each bbox.

    Each proposals will be assigned with `-1`, or a semi-positive integer
    indicating the ground truth index.

    - -1: negative sample, no assigned gt
    - semi-positive integer: positive sample, index (0-based) of assigned gt

    Args:
        pos_iou_thr (float): IoU threshold for positive bboxes.
        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
        min_pos_iou (float): Minimum iou for a bbox to be considered as a
            positive bbox. Positive samples can have smaller IoU than
            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
            `min_pos_iou` is set to avoid assigning bboxes that have extremely
            small iou with GT as positive samples. It brings about 0.3 mAP
            improvements in 1x schedule but does not affect the performance of
            3x schedule. More comparisons can be found in
            `PR #7464 <https://github.com/open-mmlab/mmdetection/pull/7464>`_.
        gt_max_assign_all (bool): Whether to assign all bboxes with the same
            highest overlap with some gt to that gt.
        ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
            `gt_bboxes_ignore` is specified). Negative values mean not
            ignoring any bboxes.
        ignore_wrt_candidates (bool): Whether to compute the iof between
            `bboxes` and `gt_bboxes_ignore`, or the contrary.
        match_low_quality (bool): Whether to allow low quality matches. This is
            usually allowed for RPN and single stage detectors, but not allowed
            in the second stage. Details are demonstrated in Step 4.
        gpu_assign_thr (int): The upper bound of the number of GT for GPU
            assign. When the number of gt is above this threshold, will assign
            on CPU device. Negative values mean not assign on CPU.
    """

    def __init__(self,
                 pos_iou_thr,
                 neg_iou_thr,
                 min_pos_iou=.0,
                 gt_max_assign_all=True,
                 ignore_iof_thr=-1,
                 ignore_wrt_candidates=True,
                 match_low_quality=True,
                 gpu_assign_thr=-1,
                 iou_calculator=dict(type='BboxOverlaps2D')):
        self.pos_iou_thr = pos_iou_thr
        self.neg_iou_thr = neg_iou_thr
        self.min_pos_iou = min_pos_iou
        self.gt_max_assign_all = gt_max_assign_all
        self.ignore_iof_thr = ignore_iof_thr
        self.ignore_wrt_candidates = ignore_wrt_candidates
        self.gpu_assign_thr = gpu_assign_thr
        self.match_low_quality = match_low_quality
        self.iou_calculator = build_iou_calculator(iou_calculator)

    def assign(self,
               batch_bboxes,
               batch_gt_bboxes,
               batch_gt_bboxes_ignore=None,
               batch_gt_labels=None,
               batch_bboxes_ignore_mask=None,
               batch_num_gts=None):
        """Assign gt to bboxes.

        Args:
            batch_bboxes (Tensor): Bounding boxes to be assigned,
                shape(b, n, 4).
            batch_gt_bboxes (Tensor): Ground truth boxes,
                shape (b, k, 4).
            batch_gt_bboxes_ignore (Tensor, optional): Ground truth
                bboxes that are labelled as `ignored`,
                e.g., crowd boxes in COCO.
            batch_gt_labels (Tensor, optional): Label of gt_bboxes,
                shape (b, k, ).
            batch_bboxes_ignore_mask: (b, n)
            batch_num_gts:(b, )
        Returns:
            :obj:`AssignResult`: The assign result.
        """
        batch_overlaps = self.iou_calculator(batch_gt_bboxes, batch_bboxes)
        batch_overlaps = masked_fill(
            batch_overlaps,
            batch_bboxes_ignore_mask.unsqueeze(1).float(),
            -1,
            neg=True)
        if self.ignore_iof_thr > 0 and batch_gt_bboxes_ignore is not None:
            if self.ignore_wrt_candidates:
                batch_ignore_overlaps = self.iou_calculator(
                    batch_bboxes, batch_gt_bboxes_ignore, mode='iof')
                batch_ignore_overlaps = masked_fill(batch_ignore_overlaps,
                                                    batch_bboxes_ignore_mask,
                                                    -1)
                batch_ignore_max_overlaps, _ = batch_ignore_overlaps.max(dim=2)
            else:
                batch_ignore_overlaps = self.iou_calculator(
                    batch_gt_bboxes_ignore, batch_bboxes, mode='iof')
                batch_ignore_overlaps = masked_fill(batch_ignore_overlaps,
                                                    batch_bboxes_ignore_mask,
                                                    -1)
                batch_ignore_max_overlaps, _ = \
                    batch_ignore_overlaps.max(dim=1)
            batch_ignore_mask = \
                batch_ignore_max_overlaps > self.ignore_iof_thr
            batch_overlaps = masked_fill(batch_overlaps, batch_ignore_mask, -1)
        batch_assign_result = self.batch_assign_wrt_overlaps(
            batch_overlaps, batch_gt_labels, batch_num_gts)
        return batch_assign_result

    def batch_assign_wrt_overlaps(self,
                                  batch_overlaps,
                                  batch_gt_labels=None,
                                  batch_num_gts=None):
        num_images, num_gts, num_bboxes = batch_overlaps.size()
        batch_max_overlaps, batch_argmax_overlaps = batch_overlaps.max(dim=1)
        if isinstance(self.neg_iou_thr, float):
            batch_neg_mask = \
                ((batch_max_overlaps >= 0)
                 & (batch_max_overlaps < self.neg_iou_thr)).int()
        elif isinstance(self.neg_iou_thr, tuple):
            assert len(self.neg_iou_thr) == 2
            batch_neg_mask = \
                ((batch_max_overlaps >= self.neg_iou_thr[0])
                 & (batch_max_overlaps < self.neg_iou_thr[1])).int()
        else:
            batch_neg_mask = torch.zeros(
                batch_max_overlaps.size(),
                dtype=torch.int,
                device=batch_max_overlaps.device)
        batch_pos_mask = (batch_max_overlaps >= self.pos_iou_thr).int()
        if self.match_low_quality:
            batch_gt_max_overlaps, batch_gt_argmax_overlaps = \
                batch_overlaps.max(dim=2)
            batch_index_bool = (batch_gt_max_overlaps >= self.min_pos_iou) & \
                               (batch_gt_max_overlaps > 0)
            if self.gt_max_assign_all:
                pos_inds_low_quality = \
                    (batch_overlaps == batch_gt_max_overlaps.unsqueeze(2)) & \
                    batch_index_bool.unsqueeze(2)
                for i in range(num_gts):
                    pos_inds_low_quality_gt = pos_inds_low_quality[:, i, :]
                    batch_argmax_overlaps[pos_inds_low_quality_gt] = i
                    batch_pos_mask[pos_inds_low_quality_gt] = 1
            else:
                index_temp = torch.arange(
                    0, num_gts, device=batch_max_overlaps.device)
                for index_image in range(num_images):
                    gt_argmax_overlaps = batch_gt_argmax_overlaps[index_image]
                    index_bool = batch_index_bool[index_image]
                    pos_inds_low_quality = gt_argmax_overlaps[index_bool]
                    batch_argmax_overlaps[index_image][pos_inds_low_quality] \
                        = index_temp[index_bool]
                    batch_pos_mask[index_image][pos_inds_low_quality] = 1
        batch_neg_mask = batch_neg_mask * (1 - batch_pos_mask)
        if batch_gt_labels is not None:
            batch_anchor_gt_labels = torch.zeros((num_images, num_bboxes),
                                                 dtype=batch_gt_labels.dtype,
                                                 device=batch_gt_labels.device)
            for index_image in range(num_images):
                batch_anchor_gt_labels[index_image] = torch.index_select(
                    batch_gt_labels[index_image], 0,
                    batch_argmax_overlaps[index_image])
        else:
            batch_anchor_gt_labels = None
        return AscendAssignResult(batch_num_gts, batch_pos_mask,
                                  batch_neg_mask, batch_max_overlaps,
                                  batch_argmax_overlaps,
                                  batch_anchor_gt_labels)


================================================
FILE: mmdet/core/bbox/assigners/assign_result.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import torch

from mmdet.utils import util_mixins


class AssignResult(util_mixins.NiceRepr):
    """Stores assignments between predicted and truth boxes.

    Attributes:
        num_gts (int): the number of truth boxes considered when computing this
            assignment

        gt_inds (LongTensor): for each predicted box indicates the 1-based
            index of the assigned truth box. 0 means unassigned and -1 means
            ignore.

        max_overlaps (FloatTensor): the iou between the predicted box and its
            assigned truth box.

        labels (None | LongTensor): If specified, for each predicted box
            indicates the category label of the assigned truth box.

    Example:
        >>> # An assign result between 4 predicted boxes and 9 true boxes
        >>> # where only two boxes were assigned.
        >>> num_gts = 9
        >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])
        >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])
        >>> labels = torch.LongTensor([0, 3, 4, 0])
        >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)
        >>> print(str(self))  # xdoctest: +IGNORE_WANT
        <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),
                      labels.shape=(4,))>
        >>> # Force addition of gt labels (when adding gt as proposals)
        >>> new_labels = torch.LongTensor([3, 4, 5])
        >>> self.add_gt_(new_labels)
        >>> print(str(self))  # xdoctest: +IGNORE_WANT
        <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),
                      labels.shape=(7,))>
    """

    def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
        self.num_gts = num_gts
        self.gt_inds = gt_inds
        self.max_overlaps = max_overlaps
        self.labels = labels
        # Interface for possible user-defined properties
        self._extra_properties = {}

    @property
    def num_preds(self):
        """int: the number of predictions in this assignment"""
        return len(self.gt_inds)

    def set_extra_property(self, key, value):
        """Set user-defined new property."""
        assert key not in self.info
        self._extra_properties[key] = value

    def get_extra_property(self, key):
        """Get user-defined property."""
        return self._extra_properties.get(key, None)

    @property
    def info(self):
        """dict: a dictionary of info about the object"""
        basic_info = {
            'num_gts': self.num_gts,
            'num_preds': self.num_preds,
            'gt_inds': self.gt_inds,
            'max_overlaps': self.max_overlaps,
            'labels': self.labels,
        }
        basic_info.update(self._extra_properties)
        return basic_info

    def __nice__(self):
        """str: a "nice" summary string describing this assign result"""
        parts = []
        parts.append(f'num_gts={self.num_gts!r}')
        if self.gt_inds is None:
            parts.append(f'gt_inds={self.gt_inds!r}')
        else:
            parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}')
        if self.max_overlaps is None:
            parts.append(f'max_overlaps={self.max_overlaps!r}')
        else:
            parts.append('max_overlaps.shape='
                         f'{tuple(self.max_overlaps.shape)!r}')
        if self.labels is None:
            parts.append(f'labels={self.labels!r}')
        else:
            parts.append(f'labels.shape={tuple(self.labels.shape)!r}')
        return ', '.join(parts)

    @classmethod
    def random(cls, **kwargs):
        """Create random AssignResult for tests or debugging.

        Args:
            num_preds: number of predicted boxes
            num_gts: number of true boxes
            p_ignore (float): probability of a predicted box assigned to an
                ignored truth
            p_assigned (float): probability of a predicted box not being
                assigned
            p_use_label (float | bool): with labels or not
            rng (None | int | numpy.random.RandomState): seed or state

        Returns:
            :obj:`AssignResult`: Randomly generated assign results.

        Example:
            >>> from mmdet.core.bbox.assigners.assign_result import *  # NOQA
            >>> self = AssignResult.random()
            >>> print(self.info)
        """
        from mmdet.core.bbox import demodata
        rng = demodata.ensure_rng(kwargs.get('rng', None))

        num_gts = kwargs.get('num_gts', None)
        num_preds = kwargs.get('num_preds', None)
        p_ignore = kwargs.get('p_ignore', 0.3)
        p_assigned = kwargs.get('p_assigned', 0.7)
        p_use_label = kwargs.get('p_use_label', 0.5)
        num_classes = kwargs.get('p_use_label', 3)

        if num_gts is None:
            num_gts = rng.randint(0, 8)
        if num_preds is None:
            num_preds = rng.randint(0, 16)

        if num_gts == 0:
            max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
            gt_inds = torch.zeros(num_preds, dtype=torch.int64)
            if p_use_label is True or p_use_label < rng.rand():
                labels = torch.zeros(num_preds, dtype=torch.int64)
            else:
                labels = None
        else:
            import numpy as np

            # Create an overlap for each predicted box
            max_overlaps = torch.from_numpy(rng.rand(num_preds))

            # Construct gt_inds for each predicted box
            is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)
            # maximum number of assignments constraints
            n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))

            assigned_idxs = np.where(is_assigned)[0]
            rng.shuffle(assigned_idxs)
            assigned_idxs = assigned_idxs[0:n_assigned]
            assigned_idxs.sort()

            is_assigned[:] = 0
            is_assigned[assigned_idxs] = True

            is_ignore = torch.from_numpy(
                rng.rand(num_preds) < p_ignore) & is_assigned

            gt_inds = torch.zeros(num_preds, dtype=torch.int64)

            true_idxs = np.arange(num_gts)
            rng.shuffle(true_idxs)
            true_idxs = torch.from_numpy(true_idxs)
            gt_inds[is_assigned] = true_idxs[:n_assigned].long()

            gt_inds = torch.from_numpy(
                rng.randint(1, num_gts + 1, size=num_preds))
            gt_inds[is_ignore] = -1
            gt_inds[~is_assigned] = 0
            max_overlaps[~is_assigned] = 0

            if p_use_label is True or p_use_label < rng.rand():
                if num_classes == 0:
                    labels = torch.zeros(num_preds, dtype=torch.int64)
                else:
                    labels = torch.from_numpy(
                        # remind that we set FG labels to [0, num_class-1]
                        # since mmdet v2.0
                        # BG cat_id: num_class
                        rng.randint(0, num_classes, size=num_preds))
                    labels[~is_assigned] = 0
            else:
                labels = None

        self = cls(num_gts, gt_inds, max_overlaps, labels)
        return self

    def add_gt_(self, gt_labels):
        """Add ground truth as assigned results.

        Args:
            gt_labels (torch.Tensor): Labels of gt boxes
        """
        self_inds = torch.arange(
            1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device)
        self.gt_inds = torch.cat([self_inds, self.gt_inds])

        self.max_overlaps = torch.cat(
            [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps])

        if self.labels is not None:
            self.labels = torch.cat([gt_labels, self.labels])


================================================
FILE: mmdet/core/bbox/assigners/atss_assigner.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import warnings

import torch

from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner


@BBOX_ASSIGNERS.register_module()
class ATSSAssigner(BaseAssigner):
    """Assign a corresponding gt bbox or background to each bbox.

    Each proposals will be assigned with `0` or a positive integer
    indicating the ground truth index.

    - 0: negative sample, no assigned gt
    - positive integer: positive sample, index (1-based) of assigned gt

    If ``alpha`` is not None, it means that the dynamic cost
    ATSSAssigner is adopted, which is currently only used in the DDOD.

    Args:
        topk (float): number of bbox selected in each level
    """

    def __init__(self,
                 topk,
                 alpha=None,
                 iou_calculator=dict(type='BboxOverlaps2D'),
                 ignore_iof_thr=-1):
        self.topk = topk
        self.alpha = alpha
        self.iou_calculator = build_iou_calculator(iou_calculator)
        self.ignore_iof_thr = ignore_iof_thr

    """Assign a corresponding gt bbox or background to each bbox.

    Args:
        topk (int): number of bbox selected in each level.
        alpha (float): param of cost rate for each proposal only in DDOD.
            Default None.
        iou_calculator (dict): builder of IoU calculator.
            Default dict(type='BboxOverlaps2D').
        ignore_iof_thr (int): whether ignore max overlaps or not.
            Default -1 (1 or -1).
    """

    # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py
    def assign(self,
               bboxes,
               num_level_bboxes,
               gt_bboxes,
               gt_bboxes_ignore=None,
               gt_labels=None,
               cls_scores=None,
               bbox_preds=None):
        """Assign gt to bboxes.

        The assignment is done in following steps

        1. compute iou between all bbox (bbox of all pyramid levels) and gt
        2. compute center distance between all bbox and gt
        3. on each pyramid level, for each gt, select k bbox whose center
           are closest to the gt center, so we total select k*l bbox as
           candidates for each gt
        4. get corresponding iou for the these candidates, and compute the
           mean and std, set mean + std as the iou threshold
        5. select these candidates whose iou are greater than or equal to
           the threshold as positive
        6. limit the positive sample's center in gt

        If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds`
        are not None, the overlaps calculation in the first step
        will also include dynamic cost, which is currently only used in
        the DDOD.

        Args:
            bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
            num_level_bboxes (List): num of bboxes in each level
            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
                labelled as `ignored`, e.g., crowd boxes in COCO. Default None.
            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
            cls_scores (list[Tensor]): Classification scores for all scale
                levels, each is a 4D-tensor, the channels number is
                num_base_priors * num_classes. Default None.
            bbox_preds (list[Tensor]): Box energies / deltas for all scale
                levels, each is a 4D-tensor, the channels number is
                num_base_priors * 4. Default None.

        Returns:
            :obj:`AssignResult`: The assign result.
        """
        INF = 100000000
        bboxes = bboxes[:, :4]
        num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)

        message = 'Invalid alpha parameter because cls_scores or ' \
                  'bbox_preds are None. If you want to use the ' \
                  'cost-based ATSSAssigner,  please set cls_scores, ' \
                  'bbox_preds and self.alpha at the same time. '

        if self.alpha is None:
            # ATSSAssigner
            overlaps = self.iou_calculator(bboxes, gt_bboxes)
            if cls_scores is not None or bbox_preds is not None:
                warnings.warn(message)
        else:
            # Dynamic cost ATSSAssigner in DDOD
            assert cls_scores is not None and bbox_preds is not None, message

            # compute cls cost for bbox and GT
            cls_cost = torch.sigmoid(cls_scores[:, gt_labels])

            # compute iou between all bbox and gt
            overlaps = self.iou_calculator(bbox_preds, gt_bboxes)

            # make sure that we are in element-wise multiplication
            assert cls_cost.shape == overlaps.shape

            # overlaps is actually a cost matrix
            overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha

        # assign 0 by default
        assigned_gt_inds = overlaps.new_full((num_bboxes, ),
                                             0,
                                             dtype=torch.long)

        if num_gt == 0 or num_bboxes == 0:
            # No ground truth or boxes, return empty assignment
            max_overlaps = overlaps.new_zeros((num_bboxes, ))
            if num_gt == 0:
                # No truth, assign everything to background
                assigned_gt_inds[:] = 0
            if gt_labels is None:
                assigned_labels = None
            else:
                assigned_labels = overlaps.new_full((num_bboxes, ),
                                                    -1,
                                                    dtype=torch.long)
            return AssignResult(
                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)

        # compute center distance between all bbox and gt
        gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
        gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
        gt_points = torch.stack((gt_cx, gt_cy), dim=1)

        bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
        bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
        bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)

        distances = (bboxes_points[:, None, :] -
                     gt_points[None, :, :]).pow(2).sum(-1).sqrt()

        if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
                and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
            ignore_overlaps = self.iou_calculator(
                bboxes, gt_bboxes_ignore, mode='iof')
            ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
            ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr
            distances[ignore_idxs, :] = INF
            assigned_gt_inds[ignore_idxs] = -1

        # Selecting candidates based on the center distance
        candidate_idxs = []
        start_idx = 0
        for level, bboxes_per_level in enumerate(num_level_bboxes):
            # on each pyramid level, for each gt,
            # select k bbox whose center are closest to the gt center
            end_idx = start_idx + bboxes_per_level
            distances_per_level = distances[start_idx:end_idx, :]
            selectable_k = min(self.topk, bboxes_per_level)

            _, topk_idxs_per_level = distances_per_level.topk(
                selectable_k, dim=0, largest=False)
            candidate_idxs.append(topk_idxs_per_level + start_idx)
            start_idx = end_idx
        candidate_idxs = torch.cat(candidate_idxs, dim=0)

        # get corresponding iou for the these candidates, and compute the
        # mean and std, set mean + std as the iou threshold
        candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
        overlaps_mean_per_gt = candidate_overlaps.mean(0)
        overlaps_std_per_gt = candidate_overlaps.std(0)
        overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt

        is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]

        # limit the positive sample's center in gt
        for gt_idx in range(num_gt):
            candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
        ep_bboxes_cx = bboxes_cx.view(1, -1).expand(
            num_gt, num_bboxes).contiguous().view(-1)
        ep_bboxes_cy = bboxes_cy.view(1, -1).expand(
            num_gt, num_bboxes).contiguous().view(-1)
        candidate_idxs = candidate_idxs.view(-1)

        # calculate the left, top, right, bottom distance between positive
        # bbox center and gt side
        l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
        t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
        r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)
        b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)
        is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01

        is_pos = is_pos & is_in_gts

        # if an anchor box is assigned to multiple gts,
        # the one with the highest IoU will be selected.
        overlaps_inf = torch.full_like(overlaps,
                                       -INF).t().contiguous().view(-1)
        index = candidate_idxs.view(-1)[is_pos.view(-1)]
        overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
        overlaps_inf = overlaps_inf.view(num_gt, -1).t()

        max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
        assigned_gt_inds[
            max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1

        if gt_labels is not None:
            assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
            pos_inds = torch.nonzero(
                assigned_gt_inds > 0, as_tuple=False).squeeze()
            if pos_inds.numel() > 0:
                assigned_labels[pos_inds] = gt_labels[
                    assigned_gt_inds[pos_inds] - 1]
        else:
            assigned_labels = None
        return AssignResult(
            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)


================================================
FILE: mmdet/core/bbox/assigners/base_assigner.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod


class BaseAssigner(metaclass=ABCMeta):
    """Base assigner that assigns boxes to ground truth boxes."""

    @abstractmethod
    def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
        """Assign boxes to either a ground truth boxes or a negative boxes."""


================================================
FILE: mmdet/core/bbox/assigners/center_region_assigner.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import torch

from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner


def scale_boxes(bboxes, scale):
    """Expand an array of boxes by a given scale.

    Args:
        bboxes (Tensor): Shape (m, 4)
        scale (float): The scale factor of bboxes

    Returns:
        (Tensor): Shape (m, 4). Scaled bboxes
    """
    assert bboxes.size(1) == 4
    w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5
    h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5
    x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5
    y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5

    w_half *= scale
    h_half *= scale

    boxes_scaled = torch.zeros_like(bboxes)
    boxes_scaled[:, 0] = x_c - w_half
    boxes_scaled[:, 2] = x_c + w_half
    boxes_scaled[:, 1] = y_c - h_half
    boxes_scaled[:, 3] = y_c + h_half
    return boxes_scaled


def is_located_in(points, bboxes):
    """Are points located in bboxes.

    Args:
      points (Tensor): Points, shape: (m, 2).
      bboxes (Tensor): Bounding boxes, shape: (n, 4).

    Return:
      Tensor: Flags indicating if points are located in bboxes, shape: (m, n).
    """
    assert points.size(1) == 2
    assert bboxes.size(1) == 4
    return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \
           (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \
           (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \
           (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0))


def bboxes_area(bboxes):
    """Compute the area of an array of bboxes.

    Args:
        bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4)

    Returns:
        Tensor: Area of the bboxes. Shape: (m, )
    """
    assert bboxes.size(1) == 4
    w = (bboxes[:, 2] - bboxes[:, 0])
    h = (bboxes[:, 3] - bboxes[:, 1])
    areas = w * h
    return areas


@BBOX_ASSIGNERS.register_module()
class CenterRegionAssigner(BaseAssigner):
    """Assign pixels at the center region of a bbox as positive.

    Each proposals will be assigned with `-1`, `0`, or a positive integer
    indicating the ground truth index.
    - -1: negative samples
    - semi-positive numbers: positive sample, index (0-based) of assigned gt

    Args:
        pos_scale (float): Threshold within which pixels are
          labelled as positive.
        neg_scale (float): Threshold above which pixels are
          labelled as positive.
        min_pos_iof (float): Minimum iof of a pixel with a gt to be
          labelled as positive. Default: 1e-2
        ignore_gt_scale (float): Threshold within which the pixels
          are ignored when the gt is labelled as shadowed. Default: 0.5
        foreground_dominate (bool): If True, the bbox will be assigned as
          positive when a gt's kernel region overlaps with another's shadowed
          (ignored) region, otherwise it is set as ignored. Default to False.
    """

    def __init__(self,
                 pos_scale,
                 neg_scale,
                 min_pos_iof=1e-2,
                 ignore_gt_scale=0.5,
                 foreground_dominate=False,
                 iou_calculator=dict(type='BboxOverlaps2D')):
        self.pos_scale = pos_scale
        self.neg_scale = neg_scale
        self.min_pos_iof = min_pos_iof
        self.ignore_gt_scale = ignore_gt_scale
        self.foreground_dominate = foreground_dominate
        self.iou_calculator = build_iou_calculator(iou_calculator)

    def get_gt_priorities(self, gt_bboxes):
        """Get gt priorities according to their areas.

        Smaller gt has higher priority.

        Args:
            gt_bboxes (Tensor): Ground truth boxes, shape (k, 4).

        Returns:
            Tensor: The priority of gts so that gts with larger priority is \
              more likely to be assigned. Shape (k, )
        """
        gt_areas = bboxes_area(gt_bboxes)
        # Rank all gt bbox areas. Smaller objects has larger priority
        _, sort_idx = gt_areas.sort(descending=True)
        sort_idx = sort_idx.argsort()
        return sort_idx

    def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
        """Assign gt to bboxes.

        This method assigns gts to every bbox (proposal/anchor), each bbox \
        will be assigned with -1, or a semi-positive number. -1 means \
        negative sample, semi-positive number is the index (0-based) of \
        assigned gt.

        Args:
            bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
            gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are
              labelled as `ignored`, e.g., crowd boxes in COCO.
            gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,).

        Returns:
            :obj:`AssignResult`: The assigned result. Note that \
              shadowed_labels of shape (N, 2) is also added as an \
              `assign_result` attribute. `shadowed_labels` is a tensor \
              composed of N pairs of anchor_ind, class_label], where N \
              is the number of anchors that lie in the outer region of a \
              gt, anchor_ind is the shadowed anchor index and class_label \
              is the shadowed class label.

        Example:
            >>> self = CenterRegionAssigner(0.2, 0.2)
            >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
            >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]])
            >>> assign_result = self.assign(bboxes, gt_bboxes)
            >>> expected_gt_inds = torch.LongTensor([1, 0])
            >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
        """
        # There are in total 5 steps in the pixel assignment
        # 1. Find core (the center region, say inner 0.2)
        #     and shadow (the relatively ourter part, say inner 0.2-0.5)
        #     regions of every gt.
        # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions
        # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in
        #      the image.
        #    3.1. For overlapping objects, the prior bboxes in gt_core is
        #           assigned with the object with smallest area
        # 4. Assign prior bboxes with class label according to its gt id.
        #    4.1. Assign -1 to prior bboxes lying in shadowed gts
        #    4.2. Assign positive prior boxes with the corresponding label
        # 5. Find pixels lying in the shadow of an object and assign them with
        #      background label, but set the loss weight of its corresponding
        #      gt to zero.
        assert bboxes.size(1) == 4, 'bboxes must have size of 4'
        # 1. Find core positive and shadow region of every gt
        gt_core = scale_boxes(gt_bboxes, self.pos_scale)
        gt_shadow = scale_boxes(gt_bboxes, self.neg_scale)

        # 2. Find prior bboxes that lie in gt_core and gt_shadow regions
        bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2
        # The center points lie within the gt boxes
        is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes)
        # Only calculate bbox and gt_core IoF. This enables small prior bboxes
        #   to match large gts
        bbox_and_gt_core_overlaps = self.iou_calculator(
            bboxes, gt_core, mode='iof')
        # The center point of effective priors should be within the gt box
        is_bbox_in_gt_core = is_bbox_in_gt & (
            bbox_and_gt_core_overlaps > self.min_pos_iof)  # shape (n, k)

        is_bbox_in_gt_shadow = (
            self.iou_calculator(bboxes, gt_shadow, mode='iof') >
            self.min_pos_iof)
        # Rule out center effective positive pixels
        is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core)

        num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
        if num_gts == 0 or num_bboxes == 0:
            # If no gts exist, assign all pixels to negative
            assigned_gt_ids = \
                is_bbox_in_gt_core.new_zeros((num_bboxes,),
                                             dtype=torch.long)
            pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2))
        else:
            # Step 3: assign a one-hot gt id to each pixel, and smaller objects
            #    have high priority to assign the pixel.
            sort_idx = self.get_gt_priorities(gt_bboxes)
            assigned_gt_ids, pixels_in_gt_shadow = \
                self.assign_one_hot_gt_indices(is_bbox_in_gt_core,
                                               is_bbox_in_gt_shadow,
                                               gt_priority=sort_idx)

        if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0:
            # No ground truth or boxes, return empty assignment
            gt_bboxes_ignore = scale_boxes(
                gt_bboxes_ignore, scale=self.ignore_gt_scale)
            is_bbox_in_ignored_gts = is_located_in(bbox_centers,
                                                   gt_bboxes_ignore)
            is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1)
            assigned_gt_ids[is_bbox_in_ignored_gts] = -1

        # 4. Assign prior bboxes with class label according to its gt id.
        assigned_labels = None
        shadowed_pixel_labels = None
        if gt_labels is not None:
            # Default assigned label is the background (-1)
            assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1)
            pos_inds = torch.nonzero(
                assigned_gt_ids > 0, as_tuple=False).squeeze()
            if pos_inds.numel() > 0:
                assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds]
                                                      - 1]
            # 5. Find pixels lying in the shadow of an object
            shadowed_pixel_labels = pixels_in_gt_shadow.clone()
            if pixels_in_gt_shadow.numel() > 0:
                pixel_idx, gt_idx =\
                    pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1]
                assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \
                    'Some pixels are dually assigned to ignore and gt!'
                shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1]
                override = (
                    assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1])
                if self.foreground_dominate:
                    # When a pixel is both positive and shadowed, set it as pos
                    shadowed_pixel_labels = shadowed_pixel_labels[~override]
                else:
                    # When a pixel is both pos and shadowed, set it as shadowed
                    assigned_labels[pixel_idx[override]] = -1
                    assigned_gt_ids[pixel_idx[override]] = 0

        assign_result = AssignResult(
            num_gts, assigned_gt_ids, None, labels=assigned_labels)
        # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2)
        assign_result.set_extra_property('shadowed_labels',
                                         shadowed_pixel_labels)
        return assign_result

    def assign_one_hot_gt_indices(self,
                                  is_bbox_in_gt_core,
                                  is_bbox_in_gt_shadow,
                                  gt_priority=None):
        """Assign only one gt index to each prior box.

        Gts with large gt_priority are more likely to be assigned.

        Args:
            is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center
              is in the core area of a gt (e.g. 0-0.2).
              Shape: (num_prior, num_gt).
            is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox
              center is in the shadowed area of a gt (e.g. 0.2-0.5).
              Shape: (num_prior, num_gt).
            gt_priority (Tensor): Priorities of gts. The gt with a higher
              priority is more likely to be assigned to the bbox when the bbox
              match with multiple gts. Shape: (num_gt, ).

        Returns:
            tuple: Returns (assigned_gt_inds, shadowed_gt_inds).

                - assigned_gt_inds: The assigned gt index of each prior bbox \
                    (i.e. index from 1 to num_gts). Shape: (num_prior, ).
                - shadowed_gt_inds: shadowed gt indices. It is a tensor of \
                    shape (num_ignore, 2) with first column being the \
                    shadowed prior bbox indices and the second column the \
                    shadowed gt indices (1-based).
        """
        num_bboxes, num_gts = is_bbox_in_gt_core.shape

        if gt_priority is None:
            gt_priority = torch.arange(
                num_gts, device=is_bbox_in_gt_core.device)
        assert gt_priority.size(0) == num_gts
        # The bigger gt_priority, the more preferable to be assigned
        # The assigned inds are by default 0 (background)
        assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ),
                                                        dtype=torch.long)
        # Shadowed bboxes are assigned to be background. But the corresponding
        #   label is ignored during loss calculation, which is done through
        #   shadowed_gt_inds
        shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False)
        if is_bbox_in_gt_core.sum() == 0:  # No gt match
            shadowed_gt_inds[:, 1] += 1  # 1-based. For consistency issue
            return assigned_gt_inds, shadowed_gt_inds

        # The priority of each prior box and gt pair. If one prior box is
        #  matched bo multiple gts. Only the pair with the highest priority
        #  is saved
        pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts),
                                                    -1,
                                                    dtype=torch.long)

        # Each bbox could match with multiple gts.
        # The following codes deal with this situation
        # Matched  bboxes (to any gt). Shape: (num_pos_anchor, )
        inds_of_match = torch.any(is_bbox_in_gt_core, dim=1)
        # The matched gt index of each positive bbox. Length >= num_pos_anchor
        #   , since one bbox could match multiple gts
        matched_bbox_gt_inds = torch.nonzero(
            is_bbox_in_gt_core, as_tuple=False)[:, 1]
        # Assign priority to each bbox-gt pair.
        pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds]
        _, argmax_priority = pair_priority[inds_of_match].max(dim=1)
        assigned_gt_inds[inds_of_match] = argmax_priority + 1  # 1-based
        # Zero-out the assigned anchor box to filter the shadowed gt indices
        is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0
        # Concat the shadowed indices due to overlapping with that out side of
        #   effective scale. shape: (total_num_ignore, 2)
        shadowed_gt_inds = torch.cat(
            (shadowed_gt_inds, torch.nonzero(
                is_bbox_in_gt_core, as_tuple=False)),
            dim=0)
        # `is_bbox_in_gt_core` should be changed back to keep arguments intact.
        is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1
        # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds`
        if shadowed_gt_inds.numel() > 0:
            shadowed_gt_inds[:, 1] += 1
        return assigned_gt_inds, shadowed_gt_inds


================================================
FILE: mmdet/core/bbox/assigners/grid_assigner.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import torch

from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner


@BBOX_ASSIGNERS.register_module()
class GridAssigner(BaseAssigner):
    """Assign a corresponding gt bbox or background to each bbox.

    Each proposals will be assigned with `-1`, `0`, or a positive integer
    indicating the ground truth index.

    - -1: don't care
    - 0: negative sample, no assigned gt
    - positive integer: positive sample, index (1-based) of assigned gt

    Args:
        pos_iou_thr (float): IoU threshold for positive bboxes.
        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
        min_pos_iou (float): Minimum iou for a bbox to be considered as a
            positive bbox. Positive samples can have smaller IoU than
            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
        gt_max_assign_all (bool): Whether to assign all bboxes with the same
            highest overlap with some gt to that gt.
    """

    def __init__(self,
                 pos_iou_thr,
                 neg_iou_thr,
                 min_pos_iou=.0,
                 gt_max_assign_all=True,
                 iou_calculator=dict(type='BboxOverlaps2D')):
        self.pos_iou_thr = pos_iou_thr
        self.neg_iou_thr = neg_iou_thr
        self.min_pos_iou = min_pos_iou
        self.gt_max_assign_all = gt_max_assign_all
        self.iou_calculator = build_iou_calculator(iou_calculator)

    def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None):
        """Assign gt to bboxes. The process is very much like the max iou
        assigner, except that positive samples are constrained within the cell
        that the gt boxes fell in.

        This method assign a gt bbox to every bbox (proposal/anchor), each bbox
        will be assigned with -1, 0, or a positive number. -1 means don't care,
        0 means negative sample, positive number is the index (1-based) of
        assigned gt.
        The assignment is done in following steps, the order matters.

        1. assign every bbox to -1
        2. assign proposals whose iou with all gts <= neg_iou_thr to 0
        3. for each bbox within a cell, if the iou with its nearest gt >
            pos_iou_thr and the center of that gt falls inside the cell,
            assign it to that bbox
        4. for each gt bbox, assign its nearest proposals within the cell the
            gt bbox falls in to itself.

        Args:
            bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
            box_responsible_flags (Tensor): flag to indicate whether box is
                responsible for prediction, shape(n, )
            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).

        Returns:
            :obj:`AssignResult`: The assign result.
        """
        num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0)

        # compute iou between all gt and bboxes
        overlaps = self.iou_calculator(gt_bboxes, bboxes)

        # 1. assign -1 by default
        assigned_gt_inds = overlaps.new_full((num_bboxes, ),
                                             -1,
                                             dtype=torch.long)

        if num_gts == 0 or num_bboxes == 0:
            # No ground truth or boxes, return empty assignment
            max_overlaps = overlaps.new_zeros((num_bboxes, ))
            if num_gts == 0:
                # No truth, assign everything to background
                assigned_gt_inds[:] = 0
            if gt_labels is None:
                assigned_labels = None
            else:
                assigned_labels = overlaps.new_full((num_bboxes, ),
                                                    -1,
                                                    dtype=torch.long)
            return AssignResult(
                num_gts,
                assigned_gt_inds,
                max_overlaps,
                labels=assigned_labels)

        # 2. assign negative: below
        # for each anchor, which gt best overlaps with it
        # for each anchor, the max iou of all gts
        # shape of max_overlaps == argmax_overlaps == num_bboxes
        max_overlaps, argmax_overlaps = overlaps.max(dim=0)

        if isinstance(self.neg_iou_thr, float):
            assigned_gt_inds[(max_overlaps >= 0)
                             & (max_overlaps <= self.neg_iou_thr)] = 0
        elif isinstance(self.neg_iou_thr, (tuple, list)):
            assert len(self.neg_iou_thr) == 2
            assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0])
                             & (max_overlaps <= self.neg_iou_thr[1])] = 0

        # 3. assign positive: falls into responsible cell and above
        # positive IOU threshold, the order matters.
        # the prior condition of comparison is to filter out all
        # unrelated anchors, i.e. not box_responsible_flags
        overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1.

        # calculate max_overlaps again, but this time we only consider IOUs
        # for anchors responsible for prediction
        max_overlaps, argmax_overlaps = overlaps.max(dim=0)

        # for each gt, which anchor best overlaps with it
        # for each gt, the max iou of all proposals
        # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts
        gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)

        pos_inds = (max_overlaps >
                    self.pos_iou_thr) & box_responsible_flags.type(torch.bool)
        assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1

        # 4. assign positive to max overlapped anchors within responsible cell
        for i in range(num_gts):
            if gt_max_overlaps[i] > self.min_pos_iou:
                if self.gt_max_assign_all:
                    max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \
                         box_responsible_flags.type(torch.bool)
                    assigned_gt_inds[max_iou_inds] = i + 1
                elif box_responsible_flags[gt_argmax_overlaps[i]]:
                    assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1

        # assign labels of positive anchors
        if gt_labels is not None:
            assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1)
            pos_inds = torch.nonzero(
                assigned_gt_inds > 0, as_tuple=False).squeeze()
            if pos_inds.numel() > 0:
                assigned_labels[pos_inds] = gt_labels[
                    assigned_gt_inds[pos_inds] - 1]

        else:
            assigned_labels = None

        return AssignResult(
            num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)


================================================
FILE: mmdet/core/bbox/assigners/hungarian_assigner.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from scipy.optimize import linear_sum_assignment

from ..builder import BBOX_ASSIGNERS
from ..match_costs import build_match_cost
from ..transforms import bbox_cxcywh_to_xyxy
from .assign_result import AssignResult
from .base_assigner import BaseAssigner


@BBOX_ASSIGNERS.register_module()
class HungarianAssigner(BaseAssigner):
    """Computes one-to-one matching between predictions and ground truth.

    This class computes an assignment between the targets and the predictions
    based on the costs. The costs are weighted sum of three components:
    classification cost, regression L1 cost and regression iou cost. The
    targets don't include the no_object, so generally there are more
    predictions than targets. After the one-to-one matching, the un-matched
    are treated as backgrounds. Thus each query prediction will be assigned
    with `0` or a positive integer indicating the ground truth index:

    - 0: negative sample, no assigned gt
    - positive integer: positive sample, index (1-based) of assigned gt

    Args:
        cls_weight (int | float, optional): The scale factor for classification
            cost. Default 1.0.
        bbox_weight (int | float, optional): The scale factor for regression
            L1 cost. Default 1.0.
        iou_weight (int | float, optional): The scale factor for regression
            iou cost. Default 1.0.
        iou_calculator (dict | optional): The config for the iou calculation.
            Default type `BboxOverlaps2D`.
        iou_mode (str | optional): "iou" (intersection over union), "iof"
                (intersection over foreground), or "giou" (generalized
                intersection over union). Default "giou".
    """

    def __init__(self,
                 cls_cost=dict(type='ClassificationCost', weight=1.),
                 reg_cost=dict(type='BBoxL1Cost', weight=1.0),
                 iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)):
        self.cls_cost = build_match_cost(cls_cost)
        self.reg_cost = build_match_cost(reg_cost)
        self.iou_cost = build_match_cost(iou_cost)

    def assign(self,
               bbox_pred,
               cls_pred,
               gt_bboxes,
               gt_labels,
               img_meta,
               gt_bboxes_ignore=None,
               eps=1e-7):
        """Computes one-to-one matching based on the weighted costs.

        This method assign each query prediction to a ground truth or
        background. The `assigned_gt_inds` with -1 means don't care,
        0 means negative sample, and positive number is the index (1-based)
        of assigned gt.
        The assignment is done in the following steps, the order matters.

        1. assign every prediction to -1
        2. compute the weighted costs
        3. do Hungarian matching on CPU based on the costs
        4. assign all to 0 (background) first, then for each matched pair
           between predictions and gts, treat this prediction as foreground
           and assign the corresponding gt index (plus 1) to it.

        Args:
            bbox_pred (Tensor): Predicted boxes with normalized coordinates
                (cx, cy, w, h), which are all in range [0, 1]. Shape
                [num_query, 4].
            cls_pred (Tensor): Predicted classification logits, shape
                [num_query, num_class].
            gt_bboxes (Tensor): Ground truth boxes with unnormalized
                coordinates (x1, y1, x2, y2). Shape [num_gt, 4].
            gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,).
            img_meta (dict): Meta information for current image.
            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
                labelled as `ignored`. Default None.
            eps (int | float, optional): A value added to the denominator for
                numerical stability. Default 1e-7.

        Returns:
            :obj:`AssignResult`: The assigned result.
        """
        assert gt_bboxes_ignore is None, \
            'Only case when gt_bboxes_ignore is None is supported.'
        num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0)

        # 1. assign -1 by default
        assigned_gt_inds = bbox_pred.new_full((num_bboxes, ),
                                              -1,
                                              dtype=torch.long)
        assigned_labels = bbox_pred.new_full((num_bboxes, ),
                                             -1,
                                             dtype=torch.long)
        if num_gts == 0 or num_bboxes == 0:
            # No ground truth or boxes, return empty assignment
            if num_gts == 0:
                # No ground truth, assign all to background
                assigned_gt_inds[:] = 0
            return AssignResult(
                num_gts, assigned_gt_inds, None, labels=assigned_labels)
        img_h, img_w, _ = img_meta['img_shape']
        factor = gt_bboxes.new_tensor([img_w, img_h, img_w,
                                       img_h]).unsqueeze(0)

        # 2. compute the weighted costs
        # classification and bboxcost.
        cls_cost = self.cls_cost(cls_pred, gt_labels)
        # regression L1 cost
        normalize_gt_bboxes = gt_bboxes / factor
        reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes)
        # regression iou cost, defaultly giou is used in official DETR.
        bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor
        iou_cost = self.iou_cost(bboxes, gt_bboxes)
        # weighted sum of above three costs
        cost = cls_cost + reg_cost + iou_cost

        # 3. do Hungarian matching on CPU using linear_sum_assignment
        cost = cost.detach().cpu()
        matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
        matched_row_inds = torch.from_numpy(matched_row_inds).to(
            bbox_pred.device)
        matched_col_inds = torch.from_numpy(matched_col_inds).to(
            bbox_pred.device)

        # 4. assign backgrounds and foregrounds
        # assign all indices to backgrounds first
        assigned_gt_inds[:] = 0
        # assign foregrounds based on matching results
        assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
        assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
        return AssignResult(
            num_gts, assigned_gt_inds, None, labels=assigned_labels)


================================================
FILE: mmdet/core/bbox/assigners/mask_hungarian_assigner.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import torch
from scipy.optimize import linear_sum_assignment

from mmdet.core.bbox.builder import BBOX_ASSIGNERS
from mmdet.core.bbox.match_costs.builder import build_match_cost
from .assign_result import AssignResult
from .base_assigner import BaseAssigner


@BBOX_ASSIGNERS.register_module()
class MaskHungarianAssigner(BaseAssigner):
    """Computes one-to-one matching between predictions and ground truth for
    mask.

    This class computes an assignment between the targets and the predictions
    based on the costs. The costs are weighted sum of three components:
    classification cost, mask focal cost and mask dice cost. The
    targets don't include the no_object, so generally there are more
    predictions than targets. After the one-to-one matching, the un-matched
    are treated as backgrounds. Thus each query prediction will be assigned
    with `0` or a positive integer indicating the ground truth index:

    - 0: negative sample, no assigned gt
    - positive integer: positive sample, index (1-based) of assigned gt

    Args:
        cls_cost (:obj:`mmcv.ConfigDict` | dict): Classification cost config.
        mask_cost (:obj:`mmcv.ConfigDict` | dict): Mask cost config.
        dice_cost (:obj:`mmcv.ConfigDict` | dict): Dice cost config.
    """

    def __init__(self,
                 cls_cost=dict(type='ClassificationCost', weight=1.0),
                 mask_cost=dict(
                     type='FocalLossCost', weight=1.0, binary_input=True),
                 dice_cost=dict(type='DiceCost', weight=1.0)):
        self.cls_cost = build_match_cost(cls_cost)
        self.mask_cost = build_match_cost(mask_cost)
        self.dice_cost = build_match_cost(dice_cost)

    def assign(self,
               cls_pred,
               mask_pred,
               gt_labels,
               gt_mask,
               img_meta,
               gt_bboxes_ignore=None,
               eps=1e-7):
        """Computes one-to-one matching based on the weighted costs.

        Args:
            cls_pred (Tensor | None): Class prediction in shape
                (num_query, cls_out_channels).
            mask_pred (Tensor): Mask prediction in shape (num_query, H, W).
            gt_labels (Tensor): Label of 'gt_mask'in shape = (num_gt, ).
            gt_mask (Tensor): Ground truth mask in shape = (num_gt, H, W).
            img_meta (dict): Meta information for current image.
            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
                labelled as `ignored`. Default None.
            eps (int | float, optional): A value added to the denominator for
                numerical stability. Default 1e-7.

        Returns:
            :obj:`AssignResult`: The assigned result.
        """
        assert gt_bboxes_ignore is None, \
            'Only case when gt_bboxes_ignore is None is supported.'
        # K-Net sometimes passes cls_pred=None to this assigner.
        # So we should use the shape of mask_pred
        num_gt, num_query = gt_labels.shape[0], mask_pred.shape[0]

        # 1. assign -1 by default
        assigned_gt_inds = mask_pred.new_full((num_query, ),
                                              -1,
                                              dtype=torch.long)
        assigned_labels = mask_pred.new_full((num_query, ),
                                             -1,
                                             dtype=torch.long)
        if num_gt == 0 or num_query == 0:
            # No ground truth or boxes, return empty assignment
            if num_gt == 0:
                # No ground truth, assign all to background
                assigned_gt_inds[:] = 0
            return AssignResult(
                num_gt, assigned_gt_inds, None, labels=assigned_labels)

        # 2. compute the weighted costs
        # classification and maskcost.
        if self.cls_cost.weight != 0 and cls_pred is not None:
            cls_cost = self.cls_cost(cls_pred, gt_labels)
        else:
            cls_cost = 0

        if self.mask_cost.weight != 0:
            # mask_pred shape = [num_query, h, w]
            # gt_mask shape = [num_gt, h, w]
            # mask_cost shape = [num_query, num_gt]
            mask_cost = self.mask_cost(mask_pred, gt_mask)
        else:
            mask_cost = 0

        if self.dice_cost.weight != 0:
            dice_cost = self.dice_cost(mask_pred, gt_mask)
        else:
            dice_cost = 0
        cost = cls_cost + mask_cost + dice_cost

        # 3. do Hungarian matching on CPU using linear_sum_assignment
        cost = cost.detach().cpu()

        matched_row_inds, matched_col_inds = linear_sum_assignment(cost)
        matched_row_inds = torch.from_numpy(matched_row_inds).to(
            mask_pred.device)
        matched_col_inds = torch.from_numpy(matched_col_inds).to(
            mask_pred.device)

        # 4. assign backgrounds and foregrounds
        # assign all indices to backgrounds first
        assigned_gt_inds[:] = 0
        # assign foregrounds based on matching results
        assigned_gt_inds[matched_row_inds] = matched_col_inds + 1
        assigned_labels[matched_row_inds] = gt_labels[matched_col_inds]
        return AssignResult(
            num_gt, assigned_gt_inds, None, labels=assigned_labels)


================================================
FILE: mmdet/core/bbox/assigners/max_iou_assigner.py
================================================
# Copyright (c) OpenMMLab. All rights reserved.
import torch

from ..builder import BBOX_ASSIGNERS
from ..iou_calculators import build_iou_calculator
from .assign_result import AssignResult
from .base_assigner import BaseAssigner


@BBOX_ASSIGNERS.register_module()
class MaxIoUAssigner(BaseAssigner):
    """Assign a corresponding gt bbox or background to each bbox.

    Each proposals will be assigned with `-1`, or a semi-positive integer
    indicating the ground truth index.

    - -1: negative sample, no assigned gt
    - semi-positive integer: positive sample, index (0-based) of assigned gt

    Args:
        pos_iou_thr (float): IoU threshold for positive bboxes.
        neg_iou_thr (float or tuple): IoU threshold for negative bboxes.
        min_pos_iou (float): Minimum iou for a bbox to be considered as a
            positive bbox. Positive samples can have smaller IoU than
            pos_iou_thr due to the 4th step (assign max IoU sample to each gt).
            `min_pos_iou` is set to avoid assigning bboxes that have extremely
            small iou with GT as positive samples. It brings about 0.3 mAP
            improvements in 1x schedule but does not affect the performance of
            3x schedule. More comparisons can be found in
            `PR #7464 <https://github.com/open-mmlab/mmdetection/pull/7464>`_.
        gt_max_assign_all (bool): Whether to assign all bboxes with the same
            highest overlap with some gt to that gt.
        ignore_iof_thr (float): IoF threshold for ignoring bboxes (if
            `gt_bboxes_ignore` is specified). Negative values mean not
            ignoring any bboxes.
        ignore_wrt_candidates (bool): Whether to compute the iof between
            `bboxes` and `gt_bboxes_ignore`, or the contrary.
        match_low_quality (bool): Whether to allow low quality matches. This is
            usually allowed for RPN and single stage detectors, but not allowed
            in the second stage. Details are demonstrated in Step 4.
        gpu_assign_thr (int): The upper bound of the number of GT for GPU
            assign. When the number of gt is above this threshold, will assign
            on CPU device. Negative values mean not assign on CPU.
    """

    def __init__(self,
                 pos_iou_thr,
                 neg_iou_thr,
                 min_pos_iou=.0,
                 gt_max_assign_all=True,
                 ignore_iof_thr=-1,
                 ignore_wrt_candidates=True,
                 match_low_quality=True,
                 gpu_assign_thr=-1,
                 iou_calculator=dict(type='BboxOverlaps2D')):
        self.pos_iou_thr = pos_iou_thr
        self.neg_iou_thr = neg_iou_thr
        self.min_pos_iou = min_pos_iou
        self.gt_max_assign_all = gt_max_assign_all
        self.ignore_iof_thr = ignore_iof_thr
        self.ignore_wrt_candidates = ignore_wrt_candidates
        self.gpu_assign_thr = gpu_assign_thr
        self.match_low_quality = match_low_quality
        self.iou_calculator = build_iou_calculator(iou_calculator)

    def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
        """Assign gt to bboxes.

        This method assign a gt bbox to every bbox (proposal/anchor), each bbox
        will be assigned with -1, or a semi-positive number. -1 means negative
        sample, semi-positive number is the index (0-based) of assigned gt.
        The assignment is done in following steps, the order matters.

        1. assign every bbox to the background
        2. assign proposals whose iou with all gts < neg_iou_thr to 0
        3. for each bbox, if the iou with its nearest gt >= pos_iou_thr,
           assign it to that bbox
        4. for each gt bbox, assign its nearest proposals (may be more than
           one) to itself

        Args:
            bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
                labelled as `ignored`, e.g., crowd boxes in COCO.
            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).

        Returns:
            :obj:`AssignResult`: The assign result.

        Example:
            >>> self = MaxIoUAssigner(0.5, 0.5)
            >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]])
            >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]])
            >>> assign_result = self.assign(bboxes, gt_bboxes)
            >>> expected_gt_inds = torch.LongTensor([1, 0])
            >>> assert torch.all(assign_result.gt_inds == expected_gt_inds)
        """
        assign_on_cpu = True if (self.gpu_assign_thr > 0) and (
            gt_bboxes.shape[0] > self.gpu_assign_thr) else False
        # compute overlap and assign gt on CPU when number of GT is large
        if assign_on_cpu:
            device = bboxes.device
            bboxes = bboxes.cpu()
            gt_bboxes = gt_bboxes.cpu()
            if gt_bboxes_ignore is not None:
                gt_bboxes_ignore = gt_bboxes_ignore.cpu()
            if gt_labels is not None:
                gt_labels = gt_labels.cpu()

        overlaps = self.iou_calculator(gt_bboxes, bboxes)

        if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
                and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
            if self.ignore_wrt_candidates:
                ignore_overlaps = self.iou_calculator(
                    bboxes, gt_bboxes_ignore, mode='iof')
                ignore_max_overlaps, _ = ignore_overlaps.max(dim=1)
            else:
                ignore_overlaps = self.iou_calculator(
                    gt_bboxes_ignore, bboxes, mode='iof')
                ignore_max_overlaps, _ = ignore_overlaps.max(dim=0)
            overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1

        assign_result = self.assign_wrt_overlaps(overlaps, gt_labels)
        if assign_on_cpu:
            assign_result.gt_inds = assign_result.gt_inds.to(device)
            assign_result.max_overlaps = assign_result.max_overlaps.to(device)
            if assign_result.labels is not None:
                assign_result.labels = assign_result.labels.to(device)
        return assign_result

    def assign_wrt_overlaps(self, overlaps, gt_labels=None):
        """Assign w.r.t. the overlaps of bboxes with gts.

        Args:
            overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes,
                shape(k, n).
            gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ).

        Returns:
            :obj:`AssignResult`: The assign result.
        """
        num_gts, num_bboxes = overlaps.size(0), overlaps.size(1)

        # 1. assign -1 by default
        assigned_gt_inds = overlaps.new_full((num_bboxes, ),
                                             -1,
                                             dtype=torch.long)

        if num_gts == 0 or num_bboxes == 0:
            # No ground truth or boxes, return empty assignment
            max_overlaps = overlaps.new_zeros((num_bboxes, ))
            if num_gts == 0:
                # No truth, assign everything to background
                assigned_gt_inds[:] = 0
            if gt_labels is None:
                assigned_labels = None
            else:
                assigned_labels = overlaps.new_full((num_bboxes, ),
                                                    -1,
                                                    dtype=torch.long)
            return AssignResult(
                num_gts,
                assigned_gt_inds,
                max_overlaps,
                labels=assigned_labels)

        # for each anchor, which gt best overlaps with it
        # for each anchor, the max iou of all gts
        max_overlaps, argmax_overlaps = overlaps.max(dim=0)
        # for each gt, which anchor best overlaps with it
        # for each gt, the max iou of all proposals
        gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1)

        # 2. assign negative: below
        # the negative inds are set to be 0
        if isinstance(self.neg_iou_thr, float):
            assigned_gt_inds[(max_overlaps >= 0)
                             & (max_overlaps < self.neg_iou_thr)] = 0
        elif isinstance(self.neg_iou_thr, tuple):
            assert len(self.neg_iou_thr) == 2
            assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0])
                             & (max_overlaps < self.neg_iou_thr[1])] = 0

        # 3. assign positive: above positive IoU threshold
        pos_inds = max_overlaps >= self.pos_iou_thr
        assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1

        if self.match_low_quality:
            # Low-quality matching will overwri
Download .txt
gitextract_fypon3it/

├── .gitignore
├── LICENSE
├── README.md
├── app.py
├── mmdet/
│   ├── __init__.py
│   ├── apis/
│   │   ├── __init__.py
│   │   ├── inference.py
│   │   ├── test.py
│   │   └── train.py
│   ├── core/
│   │   ├── __init__.py
│   │   ├── anchor/
│   │   │   ├── __init__.py
│   │   │   ├── anchor_generator.py
│   │   │   ├── builder.py
│   │   │   ├── point_generator.py
│   │   │   └── utils.py
│   │   ├── bbox/
│   │   │   ├── __init__.py
│   │   │   ├── assigners/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── approx_max_iou_assigner.py
│   │   │   │   ├── ascend_assign_result.py
│   │   │   │   ├── ascend_max_iou_assigner.py
│   │   │   │   ├── assign_result.py
│   │   │   │   ├── atss_assigner.py
│   │   │   │   ├── base_assigner.py
│   │   │   │   ├── center_region_assigner.py
│   │   │   │   ├── grid_assigner.py
│   │   │   │   ├── hungarian_assigner.py
│   │   │   │   ├── mask_hungarian_assigner.py
│   │   │   │   ├── max_iou_assigner.py
│   │   │   │   ├── point_assigner.py
│   │   │   │   ├── region_assigner.py
│   │   │   │   ├── sim_ota_assigner.py
│   │   │   │   ├── task_aligned_assigner.py
│   │   │   │   └── uniform_assigner.py
│   │   │   ├── builder.py
│   │   │   ├── coder/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_bbox_coder.py
│   │   │   │   ├── bucketing_bbox_coder.py
│   │   │   │   ├── delta_xywh_bbox_coder.py
│   │   │   │   ├── distance_point_bbox_coder.py
│   │   │   │   ├── legacy_delta_xywh_bbox_coder.py
│   │   │   │   ├── pseudo_bbox_coder.py
│   │   │   │   ├── tblr_bbox_coder.py
│   │   │   │   └── yolo_bbox_coder.py
│   │   │   ├── demodata.py
│   │   │   ├── iou_calculators/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── builder.py
│   │   │   │   └── iou2d_calculator.py
│   │   │   ├── match_costs/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── builder.py
│   │   │   │   └── match_cost.py
│   │   │   ├── samplers/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_sampler.py
│   │   │   │   ├── combined_sampler.py
│   │   │   │   ├── instance_balanced_pos_sampler.py
│   │   │   │   ├── iou_balanced_neg_sampler.py
│   │   │   │   ├── mask_pseudo_sampler.py
│   │   │   │   ├── mask_sampling_result.py
│   │   │   │   ├── ohem_sampler.py
│   │   │   │   ├── pseudo_sampler.py
│   │   │   │   ├── random_sampler.py
│   │   │   │   ├── sampling_result.py
│   │   │   │   └── score_hlr_sampler.py
│   │   │   └── transforms.py
│   │   ├── data_structures/
│   │   │   ├── __init__.py
│   │   │   ├── general_data.py
│   │   │   └── instance_data.py
│   │   ├── evaluation/
│   │   │   ├── __init__.py
│   │   │   ├── bbox_overlaps.py
│   │   │   ├── class_names.py
│   │   │   ├── eval_hooks.py
│   │   │   ├── mean_ap.py
│   │   │   ├── panoptic_utils.py
│   │   │   └── recall.py
│   │   ├── export/
│   │   │   ├── __init__.py
│   │   │   ├── model_wrappers.py
│   │   │   ├── onnx_helper.py
│   │   │   └── pytorch2onnx.py
│   │   ├── hook/
│   │   │   ├── __init__.py
│   │   │   ├── checkloss_hook.py
│   │   │   ├── ema.py
│   │   │   ├── memory_profiler_hook.py
│   │   │   ├── set_epoch_info_hook.py
│   │   │   ├── sync_norm_hook.py
│   │   │   ├── sync_random_size_hook.py
│   │   │   ├── wandblogger_hook.py
│   │   │   ├── yolox_lrupdater_hook.py
│   │   │   └── yolox_mode_switch_hook.py
│   │   ├── mask/
│   │   │   ├── __init__.py
│   │   │   ├── mask_target.py
│   │   │   ├── structures.py
│   │   │   └── utils.py
│   │   ├── optimizers/
│   │   │   ├── __init__.py
│   │   │   ├── builder.py
│   │   │   └── layer_decay_optimizer_constructor.py
│   │   ├── post_processing/
│   │   │   ├── __init__.py
│   │   │   ├── bbox_nms.py
│   │   │   ├── matrix_nms.py
│   │   │   └── merge_augs.py
│   │   ├── utils/
│   │   │   ├── __init__.py
│   │   │   ├── dist_utils.py
│   │   │   └── misc.py
│   │   └── visualization/
│   │       ├── __init__.py
│   │       ├── image.py
│   │       └── palette.py
│   ├── datasets/
│   │   ├── __init__.py
│   │   ├── api_wrappers/
│   │   │   ├── __init__.py
│   │   │   ├── coco_api.py
│   │   │   └── panoptic_evaluation.py
│   │   ├── builder.py
│   │   ├── cityscapes.py
│   │   ├── coco.py
│   │   ├── coco_occluded.py
│   │   ├── coco_panoptic.py
│   │   ├── custom.py
│   │   ├── dataset_wrappers.py
│   │   ├── deepfashion.py
│   │   ├── lvis.py
│   │   ├── objects365.py
│   │   ├── openimages.py
│   │   ├── pipelines/
│   │   │   ├── __init__.py
│   │   │   ├── auto_augment.py
│   │   │   ├── compose.py
│   │   │   ├── formating.py
│   │   │   ├── formatting.py
│   │   │   ├── instaboost.py
│   │   │   ├── loading.py
│   │   │   ├── test_time_aug.py
│   │   │   └── transforms.py
│   │   ├── samplers/
│   │   │   ├── __init__.py
│   │   │   ├── class_aware_sampler.py
│   │   │   ├── distributed_sampler.py
│   │   │   ├── group_sampler.py
│   │   │   └── infinite_sampler.py
│   │   ├── utils.py
│   │   ├── voc.py
│   │   ├── wider_face.py
│   │   └── xml_style.py
│   ├── models/
│   │   ├── __init__.py
│   │   ├── backbones/
│   │   │   ├── __init__.py
│   │   │   ├── csp_darknet.py
│   │   │   ├── darknet.py
│   │   │   ├── detectors_resnet.py
│   │   │   ├── detectors_resnext.py
│   │   │   ├── efficientnet.py
│   │   │   ├── hourglass.py
│   │   │   ├── hrnet.py
│   │   │   ├── mobilenet_v2.py
│   │   │   ├── pvt.py
│   │   │   ├── regnet.py
│   │   │   ├── res2net.py
│   │   │   ├── resnest.py
│   │   │   ├── resnet.py
│   │   │   ├── resnext.py
│   │   │   ├── ssd_vgg.py
│   │   │   ├── swin.py
│   │   │   └── trident_resnet.py
│   │   ├── builder.py
│   │   ├── dense_heads/
│   │   │   ├── __init__.py
│   │   │   ├── anchor_free_head.py
│   │   │   ├── anchor_head.py
│   │   │   ├── ascend_anchor_head.py
│   │   │   ├── ascend_retina_head.py
│   │   │   ├── ascend_ssd_head.py
│   │   │   ├── atss_head.py
│   │   │   ├── autoassign_head.py
│   │   │   ├── base_dense_head.py
│   │   │   ├── base_mask_head.py
│   │   │   ├── cascade_rpn_head.py
│   │   │   ├── centernet_head.py
│   │   │   ├── centripetal_head.py
│   │   │   ├── corner_head.py
│   │   │   ├── ddod_head.py
│   │   │   ├── deformable_detr_head.py
│   │   │   ├── dense_test_mixins.py
│   │   │   ├── detr_head.py
│   │   │   ├── embedding_rpn_head.py
│   │   │   ├── fcos_head.py
│   │   │   ├── fovea_head.py
│   │   │   ├── free_anchor_retina_head.py
│   │   │   ├── fsaf_head.py
│   │   │   ├── ga_retina_head.py
│   │   │   ├── ga_rpn_head.py
│   │   │   ├── gfl_head.py
│   │   │   ├── guided_anchor_head.py
│   │   │   ├── lad_head.py
│   │   │   ├── ld_head.py
│   │   │   ├── mask2former_head.py
│   │   │   ├── maskformer_head.py
│   │   │   ├── nasfcos_head.py
│   │   │   ├── paa_head.py
│   │   │   ├── pisa_retinanet_head.py
│   │   │   ├── pisa_ssd_head.py
│   │   │   ├── reppoints_head.py
│   │   │   ├── retina_head.py
│   │   │   ├── retina_sepbn_head.py
│   │   │   ├── rpn_head.py
│   │   │   ├── sabl_retina_head.py
│   │   │   ├── solo_head.py
│   │   │   ├── solov2_head.py
│   │   │   ├── ssd_head.py
│   │   │   ├── tood_head.py
│   │   │   ├── vfnet_head.py
│   │   │   ├── yolact_head.py
│   │   │   ├── yolo_head.py
│   │   │   ├── yolof_head.py
│   │   │   └── yolox_head.py
│   │   ├── detectors/
│   │   │   ├── __init__.py
│   │   │   ├── atss.py
│   │   │   ├── autoassign.py
│   │   │   ├── base.py
│   │   │   ├── cascade_rcnn.py
│   │   │   ├── centernet.py
│   │   │   ├── cornernet.py
│   │   │   ├── ddod.py
│   │   │   ├── deformable_detr.py
│   │   │   ├── detr.py
│   │   │   ├── fast_rcnn.py
│   │   │   ├── faster_rcnn.py
│   │   │   ├── fcos.py
│   │   │   ├── fovea.py
│   │   │   ├── fsaf.py
│   │   │   ├── gfl.py
│   │   │   ├── grid_rcnn.py
│   │   │   ├── htc.py
│   │   │   ├── kd_one_stage.py
│   │   │   ├── lad.py
│   │   │   ├── mask2former.py
│   │   │   ├── mask_rcnn.py
│   │   │   ├── mask_scoring_rcnn.py
│   │   │   ├── maskformer.py
│   │   │   ├── nasfcos.py
│   │   │   ├── paa.py
│   │   │   ├── panoptic_fpn.py
│   │   │   ├── panoptic_two_stage_segmentor.py
│   │   │   ├── point_rend.py
│   │   │   ├── queryinst.py
│   │   │   ├── reppoints_detector.py
│   │   │   ├── retinanet.py
│   │   │   ├── rpn.py
│   │   │   ├── scnet.py
│   │   │   ├── single_stage.py
│   │   │   ├── single_stage_instance_seg.py
│   │   │   ├── solo.py
│   │   │   ├── solov2.py
│   │   │   ├── sparse_rcnn.py
│   │   │   ├── tood.py
│   │   │   ├── trident_faster_rcnn.py
│   │   │   ├── two_stage.py
│   │   │   ├── vfnet.py
│   │   │   ├── yolact.py
│   │   │   ├── yolo.py
│   │   │   ├── yolof.py
│   │   │   └── yolox.py
│   │   ├── losses/
│   │   │   ├── __init__.py
│   │   │   ├── accuracy.py
│   │   │   ├── ae_loss.py
│   │   │   ├── balanced_l1_loss.py
│   │   │   ├── cross_entropy_loss.py
│   │   │   ├── dice_loss.py
│   │   │   ├── focal_loss.py
│   │   │   ├── gaussian_focal_loss.py
│   │   │   ├── gfocal_loss.py
│   │   │   ├── ghm_loss.py
│   │   │   ├── iou_loss.py
│   │   │   ├── kd_loss.py
│   │   │   ├── mse_loss.py
│   │   │   ├── pisa_loss.py
│   │   │   ├── seesaw_loss.py
│   │   │   ├── smooth_l1_loss.py
│   │   │   ├── utils.py
│   │   │   └── varifocal_loss.py
│   │   ├── necks/
│   │   │   ├── __init__.py
│   │   │   ├── bfp.py
│   │   │   ├── channel_mapper.py
│   │   │   ├── ct_resnet_neck.py
│   │   │   ├── dilated_encoder.py
│   │   │   ├── dyhead.py
│   │   │   ├── fpg.py
│   │   │   ├── fpn.py
│   │   │   ├── fpn_carafe.py
│   │   │   ├── hrfpn.py
│   │   │   ├── nas_fpn.py
│   │   │   ├── nasfcos_fpn.py
│   │   │   ├── pafpn.py
│   │   │   ├── rfp.py
│   │   │   ├── ssd_neck.py
│   │   │   ├── yolo_neck.py
│   │   │   └── yolox_pafpn.py
│   │   ├── plugins/
│   │   │   ├── __init__.py
│   │   │   ├── dropblock.py
│   │   │   ├── msdeformattn_pixel_decoder.py
│   │   │   └── pixel_decoder.py
│   │   ├── roi_heads/
│   │   │   ├── __init__.py
│   │   │   ├── base_roi_head.py
│   │   │   ├── bbox_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── bbox_head.py
│   │   │   │   ├── convfc_bbox_head.py
│   │   │   │   ├── dii_head.py
│   │   │   │   ├── double_bbox_head.py
│   │   │   │   ├── sabl_head.py
│   │   │   │   └── scnet_bbox_head.py
│   │   │   ├── cascade_roi_head.py
│   │   │   ├── double_roi_head.py
│   │   │   ├── dynamic_roi_head.py
│   │   │   ├── grid_roi_head.py
│   │   │   ├── htc_roi_head.py
│   │   │   ├── mask_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── coarse_mask_head.py
│   │   │   │   ├── dynamic_mask_head.py
│   │   │   │   ├── fcn_mask_head.py
│   │   │   │   ├── feature_relay_head.py
│   │   │   │   ├── fused_semantic_head.py
│   │   │   │   ├── global_context_head.py
│   │   │   │   ├── grid_head.py
│   │   │   │   ├── htc_mask_head.py
│   │   │   │   ├── mask_point_head.py
│   │   │   │   ├── maskiou_head.py
│   │   │   │   ├── scnet_mask_head.py
│   │   │   │   └── scnet_semantic_head.py
│   │   │   ├── mask_scoring_roi_head.py
│   │   │   ├── pisa_roi_head.py
│   │   │   ├── point_rend_roi_head.py
│   │   │   ├── roi_extractors/
│   │   │   │   ├── __init__.py
│   │   │   │   ├── base_roi_extractor.py
│   │   │   │   ├── generic_roi_extractor.py
│   │   │   │   └── single_level_roi_extractor.py
│   │   │   ├── scnet_roi_head.py
│   │   │   ├── shared_heads/
│   │   │   │   ├── __init__.py
│   │   │   │   └── res_layer.py
│   │   │   ├── sparse_roi_head.py
│   │   │   ├── standard_roi_head.py
│   │   │   ├── test_mixins.py
│   │   │   └── trident_roi_head.py
│   │   ├── seg_heads/
│   │   │   ├── __init__.py
│   │   │   ├── base_semantic_head.py
│   │   │   ├── panoptic_fpn_head.py
│   │   │   └── panoptic_fusion_heads/
│   │   │       ├── __init__.py
│   │   │       ├── base_panoptic_fusion_head.py
│   │   │       ├── heuristic_fusion_head.py
│   │   │       └── maskformer_fusion_head.py
│   │   └── utils/
│   │       ├── __init__.py
│   │       ├── brick_wrappers.py
│   │       ├── builder.py
│   │       ├── ckpt_convert.py
│   │       ├── conv_upsample.py
│   │       ├── csp_layer.py
│   │       ├── gaussian_target.py
│   │       ├── inverted_residual.py
│   │       ├── make_divisible.py
│   │       ├── misc.py
│   │       ├── normed_predictor.py
│   │       ├── panoptic_gt_processing.py
│   │       ├── point_sample.py
│   │       ├── positional_encoding.py
│   │       ├── res_layer.py
│   │       ├── se_layer.py
│   │       └── transformer.py
│   ├── utils/
│   │   ├── __init__.py
│   │   ├── ascend_util.py
│   │   ├── collect_env.py
│   │   ├── compat_config.py
│   │   ├── contextmanagers.py
│   │   ├── logger.py
│   │   ├── memory.py
│   │   ├── misc.py
│   │   ├── profiling.py
│   │   ├── replace_cfg_vals.py
│   │   ├── rfnext.py
│   │   ├── setup_env.py
│   │   ├── split_batch.py
│   │   ├── util_distribution.py
│   │   ├── util_mixins.py
│   │   └── util_random.py
│   └── version.py
├── projects/
│   ├── configs/
│   │   ├── _base_/
│   │   │   ├── datasets/
│   │   │   │   ├── coco_detection.py
│   │   │   │   ├── coco_instance.py
│   │   │   │   └── coco_panoptic.py
│   │   │   └── default_runtime.py
│   │   ├── focalnet_dino/
│   │   │   ├── focalnet-l-dino_sam-vit-b.py
│   │   │   ├── focalnet-l-dino_sam-vit-h.py
│   │   │   ├── focalnet-l-dino_sam-vit-h_best-in-multi_cascade.py
│   │   │   └── focalnet-l-dino_sam-vit-l.py
│   │   └── hdetr/
│   │       ├── r50-hdetr_sam-vit-b.py
│   │       ├── r50-hdetr_sam-vit-b_best-in-multi.py
│   │       ├── r50-hdetr_sam-vit-b_best-in-multi_cascade.py
│   │       ├── r50-hdetr_sam-vit-b_cascade.py
│   │       ├── r50-hdetr_sam-vit-l.py
│   │       ├── swin-l-hdetr_sam-vit-b.py
│   │       ├── swin-l-hdetr_sam-vit-h.py
│   │       ├── swin-l-hdetr_sam-vit-h_best-in-multi_cascade.py
│   │       ├── swin-l-hdetr_sam-vit-l.py
│   │       ├── swin-t-hdetr_sam-vit-b.py
│   │       └── swin-t-hdetr_sam-vit-l.py
│   └── instance_segment_anything/
│       ├── __init__.py
│       ├── models/
│       │   ├── det_wrapper_instance_sam.py
│       │   ├── det_wrapper_instance_sam_cascade.py
│       │   ├── focalnet_dino/
│       │   │   ├── focalnet_dino_wrapper.py
│       │   │   └── models/
│       │   │       ├── __init__.py
│       │   │       └── dino/
│       │   │           ├── __init__.py
│       │   │           ├── attention.py
│       │   │           ├── backbone.py
│       │   │           ├── convnext.py
│       │   │           ├── deformable_transformer.py
│       │   │           ├── dino.py
│       │   │           ├── dn_components.py
│       │   │           ├── focal.py
│       │   │           ├── matcher.py
│       │   │           ├── position_encoding.py
│       │   │           ├── segmentation.py
│       │   │           ├── swin_transformer.py
│       │   │           ├── transformer_deformable.py
│       │   │           ├── util/
│       │   │           │   ├── __init__.py
│       │   │           │   ├── box_loss.py
│       │   │           │   ├── box_ops.py
│       │   │           │   ├── coco_id2name.json
│       │   │           │   ├── get_param_dicts.py
│       │   │           │   ├── logger.py
│       │   │           │   ├── misc.py
│       │   │           │   ├── plot_utils.py
│       │   │           │   ├── slconfig.py
│       │   │           │   ├── slio.py
│       │   │           │   ├── static_data_path.py
│       │   │           │   ├── time_counter.py
│       │   │           │   ├── utils.py
│       │   │           │   ├── vis_utils.py
│       │   │           │   └── visualizer.py
│       │   │           └── utils.py
│       │   ├── hdetr/
│       │   │   ├── hdetr_wrapper.py
│       │   │   └── models/
│       │   │       ├── __init__.py
│       │   │       ├── backbone.py
│       │   │       ├── deformable_detr.py
│       │   │       ├── deformable_transformer.py
│       │   │       ├── matcher.py
│       │   │       ├── position_encoding.py
│       │   │       ├── segmentation.py
│       │   │       ├── swin_transformer.py
│       │   │       └── util/
│       │   │           ├── __init__.py
│       │   │           ├── box_ops.py
│       │   │           ├── misc.py
│       │   │           └── plot_utils.py
│       │   └── segment_anything/
│       │       ├── __init__.py
│       │       ├── automatic_mask_generator.py
│       │       ├── build_sam.py
│       │       ├── modeling/
│       │       │   ├── __init__.py
│       │       │   ├── common.py
│       │       │   ├── image_encoder.py
│       │       │   ├── mask_decoder.py
│       │       │   ├── prompt_encoder.py
│       │       │   ├── sam.py
│       │       │   └── transformer.py
│       │       ├── predictor.py
│       │       └── utils/
│       │           ├── __init__.py
│       │           ├── amg.py
│       │           ├── onnx.py
│       │           └── transforms.py
│       └── ops/
│           ├── functions/
│           │   ├── __init__.py
│           │   └── ms_deform_attn_func.py
│           ├── make.sh
│           ├── modules/
│           │   ├── __init__.py
│           │   └── ms_deform_attn.py
│           ├── setup.py
│           ├── src/
│           │   ├── cpu/
│           │   │   ├── ms_deform_attn_cpu.cpp
│           │   │   └── ms_deform_attn_cpu.h
│           │   ├── cuda/
│           │   │   ├── ms_deform_attn_cuda.cu
│           │   │   ├── ms_deform_attn_cuda.h
│           │   │   └── ms_deform_im2col_cuda.cuh
│           │   ├── ms_deform_attn.h
│           │   └── vision.cpp
│           └── test.py
├── requirements/
│   ├── albu.txt
│   ├── build.txt
│   ├── docs.txt
│   ├── mminstall.txt
│   ├── optional.txt
│   ├── readthedocs.txt
│   ├── runtime.txt
│   └── tests.txt
├── requirements.txt
├── setup.cfg
├── setup.py
└── tools/
    ├── convert_ckpt.py
    ├── dist_test.sh
    └── test.py
Download .txt
Showing preview only (251K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (3240 symbols across 393 files)

FILE: app.py
  function init_demo_detector (line 65) | def init_demo_detector(config, checkpoint=None, device='cuda:0', cfg_opt...
  function inference_demo_detector (line 112) | def inference_demo_detector(model, imgs):
  function inference (line 176) | def inference(img, config):
  function main (line 252) | def main():

FILE: mmdet/__init__.py
  function digit_version (line 7) | def digit_version(version_str):

FILE: mmdet/apis/inference.py
  function init_detector (line 18) | def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=...
  class LoadImage (line 66) | class LoadImage:
    method __call__ (line 72) | def __call__(self, results):
  function inference_detector (line 99) | def inference_detector(model, imgs):
  function async_inference_detector (line 165) | async def async_inference_detector(model, imgs):
  function show_result_pyplot (line 222) | def show_result_pyplot(model,

FILE: mmdet/apis/test.py
  function single_gpu_test (line 17) | def single_gpu_test(model,
  function multi_gpu_test (line 81) | def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
  function collect_results_cpu (line 136) | def collect_results_cpu(result_part, size, tmpdir=None):
  function collect_results_gpu (line 179) | def collect_results_gpu(result_part, size):

FILE: mmdet/apis/train.py
  function init_random_seed (line 19) | def init_random_seed(seed=None, device='cuda'):
  function set_random_seed (line 52) | def set_random_seed(seed, deterministic=False):
  function auto_scale_lr (line 71) | def auto_scale_lr(cfg, distributed, logger):
  function train_detector (line 117) | def train_detector(model,

FILE: mmdet/core/anchor/anchor_generator.py
  class AnchorGenerator (line 13) | class AnchorGenerator:
    method __init__ (line 61) | def __init__(self,
    method num_base_anchors (line 116) | def num_base_anchors(self):
    method num_base_priors (line 121) | def num_base_priors(self):
    method num_levels (line 127) | def num_levels(self):
    method gen_base_anchors (line 131) | def gen_base_anchors(self):
    method gen_single_level_base_anchors (line 151) | def gen_single_level_base_anchors(self,
    method _meshgrid (line 196) | def _meshgrid(self, x, y, row_major=True):
    method grid_priors (line 216) | def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda'):
    method single_level_grid_priors (line 241) | def single_level_grid_priors(self,
    method sparse_priors (line 283) | def sparse_priors(self,
    method grid_anchors (line 318) | def grid_anchors(self, featmap_sizes, device='cuda'):
    method single_level_grid_anchors (line 347) | def single_level_grid_anchors(self,
    method valid_flags (line 392) | def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
    method single_level_valid_flags (line 419) | def single_level_valid_flags(self,
    method __repr__ (line 451) | def __repr__(self):
  class SSDAnchorGenerator (line 471) | class SSDAnchorGenerator(AnchorGenerator):
    method __init__ (line 492) | def __init__(self,
    method gen_base_anchors (line 571) | def gen_base_anchors(self):
    method __repr__ (line 592) | def __repr__(self):
  class LegacyAnchorGenerator (line 610) | class LegacyAnchorGenerator(AnchorGenerator):
    method gen_single_level_base_anchors (line 658) | def gen_single_level_base_anchors(self,
  class LegacySSDAnchorGenerator (line 709) | class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
    method __init__ (line 716) | def __init__(self,
  class YOLOAnchorGenerator (line 734) | class YOLOAnchorGenerator(AnchorGenerator):
    method __init__ (line 744) | def __init__(self, strides, base_sizes):
    method num_levels (line 757) | def num_levels(self):
    method gen_base_anchors (line 761) | def gen_base_anchors(self):
    method gen_single_level_base_anchors (line 778) | def gen_single_level_base_anchors(self, base_sizes_per_level, center=N...
    method responsible_flags (line 806) | def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
    method single_level_responsible_flags (line 831) | def single_level_responsible_flags(self,

FILE: mmdet/core/anchor/builder.py
  function build_prior_generator (line 11) | def build_prior_generator(cfg, default_args=None):
  function build_anchor_generator (line 15) | def build_anchor_generator(cfg, default_args=None):

FILE: mmdet/core/anchor/point_generator.py
  class PointGenerator (line 10) | class PointGenerator:
    method _meshgrid (line 12) | def _meshgrid(self, x, y, row_major=True):
    method grid_points (line 20) | def grid_points(self, featmap_size, stride=16, device='cuda'):
    method valid_flags (line 30) | def valid_flags(self, featmap_size, valid_size, device='cuda'):
  class MlvlPointGenerator (line 44) | class MlvlPointGenerator:
    method __init__ (line 55) | def __init__(self, strides, offset=0.5):
    method num_levels (line 60) | def num_levels(self):
    method num_base_priors (line 65) | def num_base_priors(self):
    method _meshgrid (line 70) | def _meshgrid(self, x, y, row_major=True):
    method grid_priors (line 80) | def grid_priors(self,
    method single_level_grid_priors (line 119) | def single_level_grid_priors(self,
    method valid_flags (line 177) | def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
    method single_level_valid_flags (line 205) | def single_level_valid_flags(self,
    method sparse_priors (line 234) | def sparse_priors(self,

FILE: mmdet/core/anchor/utils.py
  function images_to_levels (line 5) | def images_to_levels(target, num_levels):
  function anchor_inside_flags (line 21) | def anchor_inside_flags(flat_anchors,
  function calc_region (line 50) | def calc_region(bbox, ratio, featmap_size=None):

FILE: mmdet/core/bbox/assigners/approx_max_iou_assigner.py
  class ApproxMaxIoUAssigner (line 10) | class ApproxMaxIoUAssigner(MaxIoUAssigner):
    method __init__ (line 40) | def __init__(self,
    method assign (line 60) | def assign(self,

FILE: mmdet/core/bbox/assigners/ascend_assign_result.py
  class AscendAssignResult (line 5) | class AscendAssignResult(util_mixins.NiceRepr):
    method __init__ (line 20) | def __init__(self,

FILE: mmdet/core/bbox/assigners/ascend_max_iou_assigner.py
  class AscendMaxIoUAssigner (line 12) | class AscendMaxIoUAssigner(BaseAssigner):
    method __init__ (line 47) | def __init__(self,
    method assign (line 67) | def assign(self,
    method batch_assign_wrt_overlaps (line 120) | def batch_assign_wrt_overlaps(self,

FILE: mmdet/core/bbox/assigners/assign_result.py
  class AssignResult (line 7) | class AssignResult(util_mixins.NiceRepr):
    method __init__ (line 43) | def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
    method num_preds (line 52) | def num_preds(self):
    method set_extra_property (line 56) | def set_extra_property(self, key, value):
    method get_extra_property (line 61) | def get_extra_property(self, key):
    method info (line 66) | def info(self):
    method __nice__ (line 78) | def __nice__(self):
    method random (line 98) | def random(cls, **kwargs):
    method add_gt_ (line 192) | def add_gt_(self, gt_labels):

FILE: mmdet/core/bbox/assigners/atss_assigner.py
  class ATSSAssigner (line 13) | class ATSSAssigner(BaseAssigner):
    method __init__ (line 29) | def __init__(self,
    method assign (line 52) | def assign(self,

FILE: mmdet/core/bbox/assigners/base_assigner.py
  class BaseAssigner (line 5) | class BaseAssigner(metaclass=ABCMeta):
    method assign (line 9) | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...

FILE: mmdet/core/bbox/assigners/center_region_assigner.py
  function scale_boxes (line 10) | def scale_boxes(bboxes, scale):
  function is_located_in (line 37) | def is_located_in(points, bboxes):
  function bboxes_area (line 55) | def bboxes_area(bboxes):
  class CenterRegionAssigner (line 72) | class CenterRegionAssigner(BaseAssigner):
    method __init__ (line 94) | def __init__(self,
    method get_gt_priorities (line 108) | def get_gt_priorities(self, gt_bboxes):
    method assign (line 126) | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...
    method assign_one_hot_gt_indices (line 257) | def assign_one_hot_gt_indices(self,

FILE: mmdet/core/bbox/assigners/grid_assigner.py
  class GridAssigner (line 11) | class GridAssigner(BaseAssigner):
    method __init__ (line 31) | def __init__(self,
    method assign (line 43) | def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=N...

FILE: mmdet/core/bbox/assigners/hungarian_assigner.py
  class HungarianAssigner (line 13) | class HungarianAssigner(BaseAssigner):
    method __init__ (line 41) | def __init__(self,
    method assign (line 49) | def assign(self,

FILE: mmdet/core/bbox/assigners/mask_hungarian_assigner.py
  class MaskHungarianAssigner (line 12) | class MaskHungarianAssigner(BaseAssigner):
    method __init__ (line 33) | def __init__(self,
    method assign (line 42) | def assign(self,

FILE: mmdet/core/bbox/assigners/max_iou_assigner.py
  class MaxIoUAssigner (line 11) | class MaxIoUAssigner(BaseAssigner):
    method __init__ (line 46) | def __init__(self,
    method assign (line 66) | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...
    method assign_wrt_overlaps (line 133) | def assign_wrt_overlaps(self, overlaps, gt_labels=None):

FILE: mmdet/core/bbox/assigners/point_assigner.py
  class PointAssigner (line 10) | class PointAssigner(BaseAssigner):
    method __init__ (line 20) | def __init__(self, scale=4, pos_num=3):
    method assign (line 24) | def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...

FILE: mmdet/core/bbox/assigners/region_assigner.py
  function calc_region (line 10) | def calc_region(bbox, ratio, stride, featmap_size=None):
  function anchor_ctr_inside_region_flags (line 27) | def anchor_ctr_inside_region_flags(anchors, stride, region):
  class RegionAssigner (line 38) | class RegionAssigner(BaseAssigner):
    method __init__ (line 54) | def __init__(self, center_ratio=0.2, ignore_ratio=0.5):
    method assign (line 58) | def assign(self,

FILE: mmdet/core/bbox/assigners/sim_ota_assigner.py
  class SimOTAAssigner (line 14) | class SimOTAAssigner(BaseAssigner):
    method __init__ (line 28) | def __init__(self,
    method assign (line 38) | def assign(self,
    method _assign (line 95) | def _assign(self,
    method get_in_gt_and_in_center_info (line 186) | def get_in_gt_and_in_center_info(self, priors, gt_bboxes):
    method dynamic_k_matching (line 230) | def dynamic_k_matching(self, cost, pairwise_ious, num_gt, valid_mask):

FILE: mmdet/core/bbox/assigners/task_aligned_assigner.py
  class TaskAlignedAssigner (line 13) | class TaskAlignedAssigner(BaseAssigner):
    method __init__ (line 31) | def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D')):
    method assign (line 36) | def assign(self,

FILE: mmdet/core/bbox/assigners/uniform_assigner.py
  class UniformAssigner (line 12) | class UniformAssigner(BaseAssigner):
    method __init__ (line 25) | def __init__(self,
    method assign (line 35) | def assign(self,

FILE: mmdet/core/bbox/builder.py
  function build_assigner (line 9) | def build_assigner(cfg, **default_args):
  function build_sampler (line 14) | def build_sampler(cfg, **default_args):
  function build_bbox_coder (line 19) | def build_bbox_coder(cfg, **default_args):

FILE: mmdet/core/bbox/coder/base_bbox_coder.py
  class BaseBBoxCoder (line 5) | class BaseBBoxCoder(metaclass=ABCMeta):
    method __init__ (line 8) | def __init__(self, **kwargs):
    method encode (line 12) | def encode(self, bboxes, gt_bboxes):
    method decode (line 16) | def decode(self, bboxes, bboxes_pred):

FILE: mmdet/core/bbox/coder/bucketing_bbox_coder.py
  class BucketingBBoxCoder (line 13) | class BucketingBBoxCoder(BaseBBoxCoder):
    method __init__ (line 35) | def __init__(self,
    method encode (line 50) | def encode(self, bboxes, gt_bboxes):
    method decode (line 72) | def decode(self, bboxes, pred_bboxes, max_shape=None):
  function generat_buckets (line 96) | def generat_buckets(proposals, num_buckets, scale_factor=1.0):
  function bbox2bucket (line 145) | def bbox2bucket(proposals,
  function bucket2bbox (line 269) | def bucket2bbox(proposals,

FILE: mmdet/core/bbox/coder/delta_xywh_bbox_coder.py
  class DeltaXYWHBBoxCoder (line 13) | class DeltaXYWHBBoxCoder(BaseBBoxCoder):
    method __init__ (line 34) | def __init__(self,
    method encode (line 47) | def encode(self, bboxes, gt_bboxes):
    method decode (line 65) | def decode(self,
  function bbox2delta (line 118) | def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., ...
  function delta2bbox (line 164) | def delta2bbox(rois,
  function onnx_delta2bbox (line 263) | def onnx_delta2bbox(rois,

FILE: mmdet/core/bbox/coder/distance_point_bbox_coder.py
  class DistancePointBBoxCoder (line 8) | class DistancePointBBoxCoder(BaseBBoxCoder):
    method __init__ (line 19) | def __init__(self, clip_border=True):
    method encode (line 23) | def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
    method decode (line 41) | def decode(self, points, pred_bboxes, max_shape=None):

FILE: mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
  class LegacyDeltaXYWHBBoxCoder (line 11) | class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder):
    method __init__ (line 34) | def __init__(self,
    method encode (line 41) | def encode(self, bboxes, gt_bboxes):
    method decode (line 59) | def decode(self,
  function legacy_bbox2delta (line 85) | def legacy_bbox2delta(proposals,
  function legacy_delta2bbox (line 134) | def legacy_delta2bbox(rois,

FILE: mmdet/core/bbox/coder/pseudo_bbox_coder.py
  class PseudoBBoxCoder (line 7) | class PseudoBBoxCoder(BaseBBoxCoder):
    method __init__ (line 10) | def __init__(self, **kwargs):
    method encode (line 13) | def encode(self, bboxes, gt_bboxes):
    method decode (line 17) | def decode(self, bboxes, pred_bboxes):

FILE: mmdet/core/bbox/coder/tblr_bbox_coder.py
  class TBLRBBoxCoder (line 10) | class TBLRBBoxCoder(BaseBBoxCoder):
    method __init__ (line 26) | def __init__(self, normalizer=4.0, clip_border=True):
    method encode (line 31) | def encode(self, bboxes, gt_bboxes):
    method decode (line 50) | def decode(self, bboxes, pred_bboxes, max_shape=None):
  function bboxes2tblr (line 77) | def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):
  function tblr2bboxes (line 124) | def tblr2bboxes(priors,

FILE: mmdet/core/bbox/coder/yolo_bbox_coder.py
  class YOLOBBoxCoder (line 10) | class YOLOBBoxCoder(BaseBBoxCoder):
    method __init__ (line 22) | def __init__(self, eps=1e-6):
    method encode (line 27) | def encode(self, bboxes, gt_bboxes, stride):
    method decode (line 62) | def decode(self, bboxes, pred_bboxes, stride):

FILE: mmdet/core/bbox/demodata.py
  function random_boxes (line 8) | def random_boxes(num=1, scale=1, rng=None):

FILE: mmdet/core/bbox/iou_calculators/builder.py
  function build_iou_calculator (line 7) | def build_iou_calculator(cfg, default_args=None):

FILE: mmdet/core/bbox/iou_calculators/iou2d_calculator.py
  function cast_tensor_type (line 7) | def cast_tensor_type(x, scale=1., dtype=None):
  function fp16_clamp (line 14) | def fp16_clamp(x, min=None, max=None):
  class BboxOverlaps2D (line 23) | class BboxOverlaps2D:
    method __init__ (line 26) | def __init__(self, scale=1., dtype=None):
    method __call__ (line 30) | def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
    method __repr__ (line 67) | def __repr__(self):
  function bbox_overlaps (line 74) | def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e...

FILE: mmdet/core/bbox/match_costs/builder.py
  function build_match_cost (line 7) | def build_match_cost(cfg, default_args=None):

FILE: mmdet/core/bbox/match_costs/match_cost.py
  class BBoxL1Cost (line 11) | class BBoxL1Cost:
    method __init__ (line 29) | def __init__(self, weight=1., box_format='xyxy'):
    method __call__ (line 34) | def __call__(self, bbox_pred, gt_bboxes):
  class FocalLossCost (line 55) | class FocalLossCost:
    method __init__ (line 80) | def __init__(self,
    method _focal_loss_cost (line 92) | def _focal_loss_cost(self, cls_pred, gt_labels):
    method _mask_focal_loss_cost (line 111) | def _mask_focal_loss_cost(self, cls_pred, gt_labels):
    method __call__ (line 136) | def __call__(self, cls_pred, gt_labels):
  class ClassificationCost (line 153) | class ClassificationCost:
    method __init__ (line 174) | def __init__(self, weight=1.):
    method __call__ (line 177) | def __call__(self, cls_pred, gt_labels):
  class IoUCost (line 197) | class IoUCost:
    method __init__ (line 215) | def __init__(self, iou_mode='giou', weight=1.):
    method __call__ (line 219) | def __call__(self, bboxes, gt_bboxes):
  class DiceCost (line 239) | class DiceCost:
    method __init__ (line 254) | def __init__(self, weight=1., pred_act=False, eps=1e-3, naive_dice=True):
    method binary_mask_dice_loss (line 260) | def binary_mask_dice_loss(self, mask_preds, gt_masks):
    method __call__ (line 283) | def __call__(self, mask_preds, gt_masks):
  class CrossEntropyLossCost (line 299) | class CrossEntropyLossCost:
    method __init__ (line 315) | def __init__(self, weight=1., use_sigmoid=True):
    method _binary_cross_entropy (line 320) | def _binary_cross_entropy(self, cls_pred, gt_labels):
    method __call__ (line 344) | def __call__(self, cls_pred, gt_labels):

FILE: mmdet/core/bbox/samplers/base_sampler.py
  class BaseSampler (line 9) | class BaseSampler(metaclass=ABCMeta):
    method __init__ (line 12) | def __init__(self,
    method _sample_pos (line 26) | def _sample_pos(self, assign_result, num_expected, **kwargs):
    method _sample_neg (line 31) | def _sample_neg(self, assign_result, num_expected, **kwargs):
    method sample (line 35) | def sample(self,

FILE: mmdet/core/bbox/samplers/combined_sampler.py
  class CombinedSampler (line 7) | class CombinedSampler(BaseSampler):
    method __init__ (line 10) | def __init__(self, pos_sampler, neg_sampler, **kwargs):
    method _sample_pos (line 15) | def _sample_pos(self, **kwargs):
    method _sample_neg (line 19) | def _sample_neg(self, **kwargs):

FILE: mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py
  class InstanceBalancedPosSampler (line 10) | class InstanceBalancedPosSampler(RandomSampler):
    method _sample_pos (line 14) | def _sample_pos(self, assign_result, num_expected, **kwargs):

FILE: mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
  class IoUBalancedNegSampler (line 10) | class IoUBalancedNegSampler(RandomSampler):
    method __init__ (line 30) | def __init__(self,
    method sample_via_interval (line 47) | def sample_via_interval(self, max_overlaps, full_set, num_expected):
    method _sample_neg (line 89) | def _sample_neg(self, assign_result, num_expected, **kwargs):

FILE: mmdet/core/bbox/samplers/mask_pseudo_sampler.py
  class MaskPseudoSampler (line 13) | class MaskPseudoSampler(BaseSampler):
    method __init__ (line 16) | def __init__(self, **kwargs):
    method _sample_pos (line 19) | def _sample_pos(self, **kwargs):
    method _sample_neg (line 23) | def _sample_neg(self, **kwargs):
    method sample (line 27) | def sample(self, assign_result, masks, gt_masks, **kwargs):

FILE: mmdet/core/bbox/samplers/mask_sampling_result.py
  class MaskSamplingResult (line 10) | class MaskSamplingResult(SamplingResult):
    method __init__ (line 13) | def __init__(self, pos_inds, neg_inds, masks, gt_masks, assign_result,
    method masks (line 37) | def masks(self):
    method __nice__ (line 41) | def __nice__(self):
    method info (line 50) | def info(self):

FILE: mmdet/core/bbox/samplers/ohem_sampler.py
  class OHEMSampler (line 10) | class OHEMSampler(BaseSampler):
    method __init__ (line 16) | def __init__(self,
    method hard_mining (line 34) | def hard_mining(self, inds, num_expected, bboxes, labels, feats):
    method _sample_pos (line 55) | def _sample_pos(self,
    method _sample_neg (line 83) | def _sample_neg(self,

FILE: mmdet/core/bbox/samplers/pseudo_sampler.py
  class PseudoSampler (line 10) | class PseudoSampler(BaseSampler):
    method __init__ (line 13) | def __init__(self, **kwargs):
    method _sample_pos (line 16) | def _sample_pos(self, **kwargs):
    method _sample_neg (line 20) | def _sample_neg(self, **kwargs):
    method sample (line 24) | def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs):

FILE: mmdet/core/bbox/samplers/random_sampler.py
  class RandomSampler (line 9) | class RandomSampler(BaseSampler):
    method __init__ (line 21) | def __init__(self,
    method random_choice (line 32) | def random_choice(self, gallery, num):
    method _sample_pos (line 64) | def _sample_pos(self, assign_result, num_expected, **kwargs):
    method _sample_neg (line 74) | def _sample_neg(self, assign_result, num_expected, **kwargs):

FILE: mmdet/core/bbox/samplers/sampling_result.py
  class SamplingResult (line 7) | class SamplingResult(util_mixins.NiceRepr):
    method __init__ (line 26) | def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
    method bboxes (line 53) | def bboxes(self):
    method to (line 57) | def to(self, device):
    method __nice__ (line 72) | def __nice__(self):
    method info (line 81) | def info(self):
    method random (line 94) | def random(cls, rng=None, **kwargs):

FILE: mmdet/core/bbox/samplers/score_hlr_sampler.py
  class ScoreHLRSampler (line 12) | class ScoreHLRSampler(BaseSampler):
    method __init__ (line 33) | def __init__(self,
    method random_choice (line 63) | def random_choice(gallery, num):
    method _sample_pos (line 92) | def _sample_pos(self, assign_result, num_expected, **kwargs):
    method _sample_neg (line 100) | def _sample_neg(self,
    method sample (line 216) | def sample(self,

FILE: mmdet/core/bbox/transforms.py
  function find_inside_bboxes (line 6) | def find_inside_bboxes(bboxes, img_h, img_w):
  function bbox_flip (line 22) | def bbox_flip(bboxes, img_shape, direction='horizontal'):
  function bbox_mapping (line 51) | def bbox_mapping(bboxes,
  function bbox_mapping_back (line 63) | def bbox_mapping_back(bboxes,
  function bbox2roi (line 75) | def bbox2roi(bbox_list):
  function roi2bbox (line 97) | def roi2bbox(rois):
  function bbox2result (line 116) | def bbox2result(bboxes, labels, num_classes):
  function distance2bbox (line 136) | def distance2bbox(points, distance, max_shape=None):
  function bbox2distance (line 189) | def bbox2distance(points, bbox, max_dis=None, eps=0.1):
  function bbox_rescale (line 213) | def bbox_rescale(bboxes, scale_factor=1.0):
  function bbox_cxcywh_to_xyxy (line 245) | def bbox_cxcywh_to_xyxy(bbox):
  function bbox_xyxy_to_cxcywh (line 259) | def bbox_xyxy_to_cxcywh(bbox):

FILE: mmdet/core/data_structures/general_data.py
  class GeneralData (line 10) | class GeneralData(NiceRepr):
    method __init__ (line 87) | def __init__(self, meta_info=None, data=None):
    method set_meta_info (line 97) | def set_meta_info(self, meta_info):
    method set_data (line 129) | def set_data(self, data):
    method new (line 141) | def new(self, meta_info=None, data=None):
    method keys (line 159) | def keys(self):
    method meta_info_keys (line 166) | def meta_info_keys(self):
    method values (line 173) | def values(self):
    method meta_info_values (line 180) | def meta_info_values(self):
    method items (line 187) | def items(self):
    method meta_info_items (line 191) | def meta_info_items(self):
    method __setattr__ (line 195) | def __setattr__(self, name, val):
    method __delattr__ (line 211) | def __delattr__(self, item):
    method __getitem__ (line 228) | def __getitem__(self, name):
    method get (line 231) | def get(self, *args):
    method pop (line 235) | def pop(self, *args):
    method __contains__ (line 252) | def __contains__(self, item):
    method to (line 257) | def to(self, *args, **kwargs):
    method cpu (line 267) | def cpu(self):
    method npu (line 277) | def npu(self):
    method mlu (line 287) | def mlu(self):
    method cuda (line 297) | def cuda(self):
    method detach (line 307) | def detach(self):
    method numpy (line 317) | def numpy(self):
    method __nice__ (line 326) | def __nice__(self):

FILE: mmdet/core/data_structures/instance_data.py
  class InstanceData (line 10) | class InstanceData(GeneralData):
    method __setattr__ (line 66) | def __setattr__(self, name, value):
    method __getitem__ (line 90) | def __getitem__(self, item):
    method cat (line 151) | def cat(instances_list):
    method __len__ (line 183) | def __len__(self):

FILE: mmdet/core/evaluation/bbox_overlaps.py
  function bbox_overlaps (line 5) | def bbox_overlaps(bboxes1,

FILE: mmdet/core/evaluation/class_names.py
  function wider_face_classes (line 5) | def wider_face_classes():
  function voc_classes (line 9) | def voc_classes():
  function imagenet_det_classes (line 17) | def imagenet_det_classes():
  function imagenet_vid_classes (line 58) | def imagenet_vid_classes():
  function coco_classes (line 68) | def coco_classes():
  function cityscapes_classes (line 86) | def cityscapes_classes():
  function oid_challenge_classes (line 93) | def oid_challenge_classes():
  function oid_v6_classes (line 188) | def oid_v6_classes():
  function objects365v1_classes (line 306) | def objects365v1_classes():
  function objects365v2_classes (line 376) | def objects365v2_classes():
  function get_classes (line 462) | def get_classes(dataset):

FILE: mmdet/core/evaluation/eval_hooks.py
  function _calc_dynamic_intervals (line 12) | def _calc_dynamic_intervals(start_interval, dynamic_interval_list):
  class EvalHook (line 24) | class EvalHook(BaseEvalHook):
    method __init__ (line 26) | def __init__(self, *args, dynamic_intervals=None, **kwargs):
    method _decide_interval (line 35) | def _decide_interval(self, runner):
    method before_train_epoch (line 42) | def before_train_epoch(self, runner):
    method before_train_iter (line 47) | def before_train_iter(self, runner):
    method _do_evaluate (line 51) | def _do_evaluate(self, runner):
  class DistEvalHook (line 73) | class DistEvalHook(BaseDistEvalHook):
    method __init__ (line 75) | def __init__(self, *args, dynamic_intervals=None, **kwargs):
    method _decide_interval (line 84) | def _decide_interval(self, runner):
    method before_train_epoch (line 91) | def before_train_epoch(self, runner):
    method before_train_iter (line 96) | def before_train_iter(self, runner):
    method _do_evaluate (line 100) | def _do_evaluate(self, runner):

FILE: mmdet/core/evaluation/mean_ap.py
  function average_precision (line 13) | def average_precision(recalls, precisions, mode='area'):
  function tpfp_imagenet (line 60) | def tpfp_imagenet(det_bboxes,
  function tpfp_default (line 169) | def tpfp_default(det_bboxes,
  function tpfp_openimages (line 272) | def tpfp_openimages(det_bboxes,
  function get_cls_results (line 477) | def get_cls_results(det_results, annotations, class_id):
  function get_cls_group_ofs (line 504) | def get_cls_group_ofs(annotations, class_id):
  function eval_map (line 525) | def eval_map(det_results,
  function print_map_summary (line 717) | def print_map_summary(mean_ap,

FILE: mmdet/core/evaluation/recall.py
  function _recalls (line 11) | def _recalls(all_ious, proposal_nums, thrs):
  function set_recall_param (line 44) | def set_recall_param(proposal_nums, iou_thrs):
  function eval_recalls (line 65) | def eval_recalls(gts,
  function print_recall_summary (line 117) | def print_recall_summary(recalls,
  function plot_num_recall (line 150) | def plot_num_recall(recalls, proposal_nums):
  function plot_iou_recall (line 175) | def plot_iou_recall(recalls, iou_thrs):

FILE: mmdet/core/export/model_wrappers.py
  class DeployBaseDetector (line 12) | class DeployBaseDetector(BaseDetector):
    method __init__ (line 15) | def __init__(self, class_names, device_id):
    method simple_test (line 20) | def simple_test(self, img, img_metas, **kwargs):
    method aug_test (line 23) | def aug_test(self, imgs, img_metas, **kwargs):
    method extract_feat (line 26) | def extract_feat(self, imgs):
    method forward_train (line 29) | def forward_train(self, imgs, img_metas, **kwargs):
    method val_step (line 32) | def val_step(self, data, optimizer):
    method train_step (line 35) | def train_step(self, data, optimizer):
    method forward_test (line 38) | def forward_test(self, *, img, img_metas, **kwargs):
    method async_simple_test (line 41) | def async_simple_test(self, img, img_metas, **kwargs):
    method forward (line 44) | def forward(self, img, img_metas, return_loss=True, **kwargs):
  class ONNXRuntimeDetector (line 96) | class ONNXRuntimeDetector(DeployBaseDetector):
    method __init__ (line 99) | def __init__(self, onnx_file, class_names, device_id):
    method forward_test (line 130) | def forward_test(self, imgs, img_metas, **kwargs):
  class TensorRTDetector (line 152) | class TensorRTDetector(DeployBaseDetector):
    method __init__ (line 155) | def __init__(self, engine_file, class_names, device_id, output_names=N...
    method forward_test (line 177) | def forward_test(self, imgs, img_metas, **kwargs):

FILE: mmdet/core/export/onnx_helper.py
  function dynamic_clip_for_onnx (line 7) | def dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape):
  function get_k_for_topk (line 46) | def get_k_for_topk(k, size):
  function add_dummy_nms_for_onnx (line 82) | def add_dummy_nms_for_onnx(boxes,
  class DummyONNXNMSop (line 201) | class DummyONNXNMSop(torch.autograd.Function):
    method forward (line 208) | def forward(ctx, boxes, scores, max_output_boxes_per_class, iou_thresh...
    method symbolic (line 214) | def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold,

FILE: mmdet/core/export/pytorch2onnx.py
  function generate_inputs_and_wrap_model (line 10) | def generate_inputs_and_wrap_model(config_path,
  function build_model_from_cfg (line 65) | def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
  function preprocess_example_input (line 102) | def preprocess_example_input(input_config):

FILE: mmdet/core/hook/checkloss_hook.py
  class CheckInvalidLossHook (line 7) | class CheckInvalidLossHook(Hook):
    method __init__ (line 18) | def __init__(self, interval=50):
    method after_train_iter (line 21) | def after_train_iter(self, runner):

FILE: mmdet/core/hook/ema.py
  class BaseEMAHook (line 8) | class BaseEMAHook(Hook):
    method __init__ (line 32) | def __init__(self,
    method before_run (line 45) | def before_run(self, runner):
    method get_momentum (line 67) | def get_momentum(self, runner):
    method after_train_iter (line 71) | def after_train_iter(self, runner):
    method after_train_epoch (line 84) | def after_train_epoch(self, runner):
    method before_train_epoch (line 89) | def before_train_epoch(self, runner):
    method _swap_ema_parameters (line 94) | def _swap_ema_parameters(self):
  class ExpMomentumEMAHook (line 104) | class ExpMomentumEMAHook(BaseEMAHook):
    method __init__ (line 112) | def __init__(self, total_iter=2000, **kwargs):
  class LinearMomentumEMAHook (line 119) | class LinearMomentumEMAHook(BaseEMAHook):
    method __init__ (line 127) | def __init__(self, warm_up=100, **kwargs):

FILE: mmdet/core/hook/memory_profiler_hook.py
  class MemoryProfilerHook (line 6) | class MemoryProfilerHook(Hook):
    method __init__ (line 15) | def __init__(self, interval=50):
    method after_iter (line 34) | def after_iter(self, runner):

FILE: mmdet/core/hook/set_epoch_info_hook.py
  class SetEpochInfoHook (line 7) | class SetEpochInfoHook(Hook):
    method before_train_epoch (line 10) | def before_train_epoch(self, runner):

FILE: mmdet/core/hook/sync_norm_hook.py
  function get_norm_states (line 11) | def get_norm_states(module):
  class SyncNormHook (line 21) | class SyncNormHook(Hook):
    method __init__ (line 30) | def __init__(self, num_last_epochs=15, interval=1):
    method before_train_epoch (line 34) | def before_train_epoch(self, runner):
    method after_train_epoch (line 40) | def after_train_epoch(self, runner):

FILE: mmdet/core/hook/sync_random_size_hook.py
  class SyncRandomSizeHook (line 12) | class SyncRandomSizeHook(Hook):
    method __init__ (line 33) | def __init__(self,
    method after_train_epoch (line 52) | def after_train_epoch(self, runner):

FILE: mmdet/core/hook/wandblogger_hook.py
  class MMDetWandbHook (line 21) | class MMDetWandbHook(WandbLoggerHook):
    method __init__ (line 95) | def __init__(self,
    method import_wandb (line 114) | def import_wandb(self):
    method before_run (line 134) | def before_run(self, runner):
    method after_train_epoch (line 206) | def after_train_epoch(self, runner):
    method after_train_iter (line 240) | def after_train_iter(self, runner):
    method _after_train_iter (line 252) | def _after_train_iter(self, runner):
    method after_run (line 283) | def after_run(self, runner):
    method _update_wandb_config (line 286) | def _update_wandb_config(self, runner):
    method _log_ckpt_as_artifact (line 298) | def _log_ckpt_as_artifact(self, model_path, aliases, metadata=None):
    method _get_eval_results (line 311) | def _get_eval_results(self):
    method _init_data_table (line 318) | def _init_data_table(self):
    method _init_pred_table (line 323) | def _init_pred_table(self):
    method _add_ground_truth (line 328) | def _add_ground_truth(self, runner):
    method _log_predictions (line 403) | def _log_predictions(self, results):
    method _get_wandb_bboxes (line 463) | def _get_wandb_bboxes(self, bboxes, labels, log_gt=True):
    method _get_wandb_masks (line 515) | def _get_wandb_masks(self,
    method _log_data_table (line 563) | def _log_data_table(self):
    method _log_eval_table (line 580) | def _log_eval_table(self, idx):

FILE: mmdet/core/hook/yolox_lrupdater_hook.py
  class YOLOXLrUpdaterHook (line 8) | class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
    method __init__ (line 23) | def __init__(self, num_last_epochs, **kwargs):
    method get_warmup_lr (line 27) | def get_warmup_lr(self, cur_iters):
    method get_lr (line 44) | def get_lr(self, runner, base_lr):

FILE: mmdet/core/hook/yolox_mode_switch_hook.py
  class YOLOXModeSwitchHook (line 7) | class YOLOXModeSwitchHook(Hook):
    method __init__ (line 21) | def __init__(self,
    method before_train_epoch (line 28) | def before_train_epoch(self, runner):

FILE: mmdet/core/mask/mask_target.py
  function mask_target (line 7) | def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_...
  function mask_target_single (line 67) | def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):

FILE: mmdet/core/mask/structures.py
  class BaseInstanceMasks (line 12) | class BaseInstanceMasks(metaclass=ABCMeta):
    method rescale (line 16) | def rescale(self, scale, interpolation='nearest'):
    method resize (line 29) | def resize(self, out_shape, interpolation='nearest'):
    method flip (line 41) | def flip(self, flip_direction='horizontal'):
    method pad (line 52) | def pad(self, out_shape, pad_val):
    method crop (line 64) | def crop(self, bbox):
    method crop_and_resize (line 75) | def crop_and_resize(self,
    method expand (line 104) | def expand(self, expanded_h, expanded_w, top, left):
    method areas (line 109) | def areas(self):
    method to_ndarray (line 113) | def to_ndarray(self):
    method to_tensor (line 121) | def to_tensor(self, dtype, device):
    method translate (line 133) | def translate(self,
    method shear (line 153) | def shear(self,
    method rotate (line 175) | def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
  class BitmapMasks (line 193) | class BitmapMasks(BaseInstanceMasks):
    method __init__ (line 222) | def __init__(self, masks, height, width):
    method __getitem__ (line 239) | def __getitem__(self, index):
    method __iter__ (line 251) | def __iter__(self):
    method __repr__ (line 254) | def __repr__(self):
    method __len__ (line 261) | def __len__(self):
    method rescale (line 265) | def rescale(self, scale, interpolation='nearest'):
    method resize (line 278) | def resize(self, out_shape, interpolation='nearest'):
    method flip (line 290) | def flip(self, flip_direction='horizontal'):
    method pad (line 303) | def pad(self, out_shape, pad_val=0):
    method crop (line 314) | def crop(self, bbox):
    method crop_and_resize (line 333) | def crop_and_resize(self,
    method expand (line 369) | def expand(self, expanded_h, expanded_w, top, left):
    method translate (line 381) | def translate(self,
    method shear (line 431) | def shear(self,
    method rotate (line 466) | def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
    method areas (line 499) | def areas(self):
    method to_ndarray (line 503) | def to_ndarray(self):
    method to_tensor (line 507) | def to_tensor(self, dtype, device):
    method random (line 512) | def random(cls,
    method get_bboxes (line 532) | def get_bboxes(self):
  class PolygonMasks (line 548) | class PolygonMasks(BaseInstanceMasks):
    method __init__ (line 588) | def __init__(self, masks, height, width):
    method __getitem__ (line 598) | def __getitem__(self, index):
    method __iter__ (line 621) | def __iter__(self):
    method __repr__ (line 624) | def __repr__(self):
    method __len__ (line 631) | def __len__(self):
    method rescale (line 635) | def rescale(self, scale, interpolation=None):
    method resize (line 644) | def resize(self, out_shape, interpolation=None):
    method flip (line 663) | def flip(self, flip_direction='horizontal'):
    method crop (line 687) | def crop(self, bbox):
    method pad (line 716) | def pad(self, out_shape, pad_val=0):
    method expand (line 720) | def expand(self, *args, **kwargs):
    method crop_and_resize (line 724) | def crop_and_resize(self,
    method translate (line 765) | def translate(self,
    method shear (line 799) | def shear(self,
    method rotate (line 831) | def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
    method to_bitmap (line 860) | def to_bitmap(self):
    method areas (line 866) | def areas(self):
    method _polygon_area (line 884) | def _polygon_area(self, x, y):
    method to_ndarray (line 900) | def to_ndarray(self):
    method to_tensor (line 910) | def to_tensor(self, dtype, device):
    method random (line 920) | def random(cls,
    method get_bboxes (line 1039) | def get_bboxes(self):
  function polygon_to_bitmap (line 1058) | def polygon_to_bitmap(polygons, height, width):
  function bitmap_to_polygon (line 1075) | def bitmap_to_polygon(bitmap):

FILE: mmdet/core/mask/utils.py
  function split_combined_polys (line 8) | def split_combined_polys(polys, poly_lens, polys_per_mask):
  function encode_mask_results (line 38) | def encode_mask_results(mask_results):
  function mask2bbox (line 68) | def mask2bbox(masks):

FILE: mmdet/core/optimizers/builder.py
  function build_optimizer_constructor (line 11) | def build_optimizer_constructor(cfg):
  function build_optimizer (line 22) | def build_optimizer(model, cfg):

FILE: mmdet/core/optimizers/layer_decay_optimizer_constructor.py
  function get_layer_id_for_convnext (line 10) | def get_layer_id_for_convnext(var_name, max_layer_id):
  function get_stage_id_for_convnext (line 53) | def get_stage_id_for_convnext(var_name, max_stage_id):
  class LearningRateDecayOptimizerConstructor (line 79) | class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor):
    method add_params (line 83) | def add_params(self, params, module, **kwargs):

FILE: mmdet/core/post_processing/bbox_nms.py
  function multiclass_nms (line 8) | def multiclass_nms(multi_bboxes,
  function fast_nms (line 98) | def fast_nms(multi_bboxes,

FILE: mmdet/core/post_processing/matrix_nms.py
  function mask_matrix_nms (line 5) | def mask_matrix_nms(masks,

FILE: mmdet/core/post_processing/merge_augs.py
  function merge_aug_proposals (line 13) | def merge_aug_proposals(aug_proposals, img_metas, cfg):
  function merge_aug_bboxes (line 84) | def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
  function merge_aug_scores (line 113) | def merge_aug_scores(aug_scores):
  function merge_aug_masks (line 121) | def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):

FILE: mmdet/core/utils/dist_utils.py
  function _allreduce_coalesced (line 15) | def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
  function allreduce_grads (line 37) | def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
  class DistOptimizerHook (line 59) | class DistOptimizerHook(OptimizerHook):
    method __init__ (line 62) | def __init__(self, *args, **kwargs):
  function reduce_mean (line 68) | def reduce_mean(tensor):
  function obj2tensor (line 77) | def obj2tensor(pyobj, device='cuda'):
  function tensor2obj (line 83) | def tensor2obj(tensor):
  function _get_global_gloo_group (line 89) | def _get_global_gloo_group():
  function all_reduce_dict (line 98) | def all_reduce_dict(py_dict, op='sum', group=None, to_float=True):
  function sync_random_seed (line 157) | def sync_random_seed(seed=None, device='cuda'):

FILE: mmdet/core/utils/misc.py
  function multi_apply (line 11) | def multi_apply(func, *args, **kwargs):
  function unmap (line 33) | def unmap(data, count, inds, fill=0):
  function mask2ndarray (line 46) | def mask2ndarray(mask):
  function flip_tensor (line 65) | def flip_tensor(src_tensor, flip_direction):
  function select_single_mlvl (line 88) | def select_single_mlvl(mlvl_tensors, batch_id, detach=True):
  function filter_scores_and_topk (line 119) | def filter_scores_and_topk(scores, score_thr, topk, results=None):
  function center_of_mass (line 168) | def center_of_mass(mask, esp=1e-6):
  function generate_coordinate (line 190) | def generate_coordinate(featmap_sizes, device='cuda'):

FILE: mmdet/core/visualization/image.py
  function color_val_matplotlib (line 25) | def color_val_matplotlib(color):
  function _get_adaptive_scales (line 40) | def _get_adaptive_scales(areas, min_area=800, max_area=30000):
  function _get_bias_color (line 63) | def _get_bias_color(base, max_dist=30):
  function draw_bboxes (line 81) | def draw_bboxes(ax, bboxes, color='g', alpha=0.8, thickness=2):
  function draw_labels (line 114) | def draw_labels(ax,
  function draw_masks (line 166) | def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8):
  function imshow_det_bboxes (line 208) | def imshow_det_bboxes(img,
  function imshow_gt_det_bboxes (line 380) | def imshow_gt_det_bboxes(img,

FILE: mmdet/core/visualization/palette.py
  function palette_val (line 6) | def palette_val(palette):
  function get_palette (line 22) | def get_palette(palette, num_classes):

FILE: mmdet/datasets/api_wrappers/coco_api.py
  class COCO (line 11) | class COCO(_COCO):
    method __init__ (line 18) | def __init__(self, annotation_file=None):
    method get_ann_ids (line 27) | def get_ann_ids(self, img_ids=[], cat_ids=[], area_rng=[], iscrowd=None):
    method get_cat_ids (line 30) | def get_cat_ids(self, cat_names=[], sup_names=[], cat_ids=[]):
    method get_img_ids (line 33) | def get_img_ids(self, img_ids=[], cat_ids=[]):
    method load_anns (line 36) | def load_anns(self, ids):
    method load_cats (line 39) | def load_cats(self, ids):
    method load_imgs (line 42) | def load_imgs(self, ids):

FILE: mmdet/datasets/api_wrappers/panoptic_evaluation.py
  function pq_compute_single_core (line 23) | def pq_compute_single_core(proc_id,
  function pq_compute_multi_core (line 173) | def pq_compute_multi_core(matched_annotations_list,

FILE: mmdet/datasets/builder.py
  function _concat_dataset (line 32) | def _concat_dataset(cfg, default_args=None):
  function build_dataset (line 59) | def build_dataset(cfg, default_args=None):
  function build_dataloader (line 87) | def build_dataloader(dataset,
  function worker_init_fn (line 209) | def worker_init_fn(worker_id, num_workers, rank, seed):

FILE: mmdet/datasets/cityscapes.py
  class CityscapesDataset (line 21) | class CityscapesDataset(CocoDataset):
    method _filter_imgs (line 29) | def _filter_imgs(self, min_size=32):
    method _parse_ann_info (line 57) | def _parse_ann_info(self, img_info, ann_info):
    method results2txt (line 111) | def results2txt(self, results, outfile_prefix):
    method format_results (line 177) | def format_results(self, results, txtfile_prefix=None):
    method evaluate (line 211) | def evaluate(self,
    method _evaluate_cityscapes (line 279) | def _evaluate_cityscapes(self, results, txtfile_prefix, logger):

FILE: mmdet/datasets/coco.py
  class CocoDataset (line 23) | class CocoDataset(CustomDataset):
    method load_annotations (line 62) | def load_annotations(self, ann_file):
    method get_ann_info (line 91) | def get_ann_info(self, idx):
    method get_cat_ids (line 106) | def get_cat_ids(self, idx):
    method _filter_imgs (line 121) | def _filter_imgs(self, min_size=32):
    method _parse_ann_info (line 145) | def _parse_ann_info(self, img_info, ann_info):
    method xyxy2xywh (line 204) | def xyxy2xywh(self, bbox):
    method _proposal2json (line 224) | def _proposal2json(self, results):
    method _det2json (line 239) | def _det2json(self, results):
    method _segm2json (line 256) | def _segm2json(self, results):
    method results2json (line 294) | def results2json(self, results, outfile_prefix):
    method fast_eval_recall (line 334) | def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=No...
    method format_results (line 358) | def format_results(self, results, jsonfile_prefix=None, **kwargs):
    method evaluate_det_segm (line 386) | def evaluate_det_segm(self,
    method evaluate (line 592) | def evaluate(self,

FILE: mmdet/datasets/coco_occluded.py
  class OccludedSeparatedCocoDataset (line 16) | class OccludedSeparatedCocoDataset(CocoDataset):
    method __init__ (line 49) | def __init__(
    method evaluate (line 66) | def evaluate(self,
    method evaluate_occluded_separated (line 92) | def evaluate_occluded_separated(self,
    method compute_recall (line 156) | def compute_recall(self,
    method mask_iou (line 213) | def mask_iou(self, mask1, mask2):

FILE: mmdet/datasets/coco_panoptic.py
  class COCOPanoptic (line 28) | class COCOPanoptic(COCO):
    method __init__ (line 37) | def __init__(self, annotation_file=None):
    method createIndex (line 46) | def createIndex(self):
    method load_anns (line 89) | def load_anns(self, ids=[]):
  class CocoPanopticDataset (line 114) | class CocoPanopticDataset(CocoDataset):
    method __init__ (line 259) | def __init__(self,
    method load_annotations (line 284) | def load_annotations(self, ann_file):
    method get_ann_info (line 306) | def get_ann_info(self, idx):
    method _parse_ann_info (line 322) | def _parse_ann_info(self, img_info, ann_info):
    method _filter_imgs (line 385) | def _filter_imgs(self, min_size=32):
    method _pan2json (line 410) | def _pan2json(self, results, outfile_prefix):
    method results2json (line 452) | def results2json(self, results, outfile_prefix):
    method evaluate_pan_json (line 503) | def evaluate_pan_json(self,
    method evaluate (line 568) | def evaluate(self,
  function parse_pq_results (line 641) | def parse_pq_results(pq_results):
  function print_panoptic_table (line 656) | def print_panoptic_table(pq_results, classwise_results=None, logger=None):

FILE: mmdet/datasets/custom.py
  class CustomDataset (line 18) | class CustomDataset(Dataset):
    method __init__ (line 59) | def __init__(self,
    method __len__ (line 133) | def __len__(self):
    method load_annotations (line 137) | def load_annotations(self, ann_file):
    method load_proposals (line 141) | def load_proposals(self, proposal_file):
    method get_ann_info (line 145) | def get_ann_info(self, idx):
    method get_cat_ids (line 157) | def get_cat_ids(self, idx):
    method pre_pipeline (line 169) | def pre_pipeline(self, results):
    method _filter_imgs (line 178) | def _filter_imgs(self, min_size=32):
    method _set_group_flag (line 189) | def _set_group_flag(self):
    method _rand_another (line 201) | def _rand_another(self, idx):
    method __getitem__ (line 206) | def __getitem__(self, idx):
    method prepare_train_img (line 226) | def prepare_train_img(self, idx):
    method prepare_test_img (line 245) | def prepare_test_img(self, idx):
    method get_classes (line 264) | def get_classes(cls, classes=None):
    method get_cat2imgs (line 290) | def get_cat2imgs(self):
    method format_results (line 309) | def format_results(self, results, **kwargs):
    method evaluate (line 312) | def evaluate(self,
    method __repr__ (line 371) | def __repr__(self):

FILE: mmdet/datasets/dataset_wrappers.py
  class ConcatDataset (line 17) | class ConcatDataset(_ConcatDataset):
    method __init__ (line 30) | def __init__(self, datasets, separate_eval=True):
    method get_cat_ids (line 50) | def get_cat_ids(self, idx):
    method get_ann_info (line 72) | def get_ann_info(self, idx):
    method evaluate (line 94) | def evaluate(self, results, logger=None, **kwargs):
  class RepeatDataset (line 154) | class RepeatDataset:
    method __init__ (line 167) | def __init__(self, dataset, times):
    method __getitem__ (line 177) | def __getitem__(self, idx):
    method get_cat_ids (line 180) | def get_cat_ids(self, idx):
    method get_ann_info (line 192) | def get_ann_info(self, idx):
    method __len__ (line 204) | def __len__(self):
  class ClassBalancedDataset (line 211) | class ClassBalancedDataset:
    method __init__ (line 247) | def __init__(self, dataset, oversample_thr, filter_empty_gt=True):
    method _get_repeat_factors (line 267) | def _get_repeat_factors(self, dataset, repeat_thr):
    method __getitem__ (line 316) | def __getitem__(self, idx):
    method get_ann_info (line 320) | def get_ann_info(self, idx):
    method __len__ (line 332) | def __len__(self):
  class MultiImageMixDataset (line 338) | class MultiImageMixDataset:
    method __init__ (line 362) | def __init__(self,
    method __len__ (line 398) | def __len__(self):
    method __getitem__ (line 401) | def __getitem__(self, idx):
    method update_skip_type_keys (line 446) | def update_skip_type_keys(self, skip_type_keys):

FILE: mmdet/datasets/deepfashion.py
  class DeepFashionDataset (line 7) | class DeepFashionDataset(CocoDataset):

FILE: mmdet/datasets/lvis.py
  class LVISV05Dataset (line 18) | class LVISV05Dataset(CocoDataset):
    method load_annotations (line 272) | def load_annotations(self, ann_file):
    method evaluate (line 311) | def evaluate(self,
  class LVISV1Dataset (line 475) | class LVISV1Dataset(LVISDataset):
    method load_annotations (line 717) | def load_annotations(self, ann_file):

FILE: mmdet/datasets/objects365.py
  class Objects365V1Dataset (line 17) | class Objects365V1Dataset(CocoDataset):
    method load_annotations (line 88) | def load_annotations(self, ann_file):
  class Objects365V2Dataset (line 127) | class Objects365V2Dataset(CocoDataset):
    method load_annotations (line 199) | def load_annotations(self, ann_file):

FILE: mmdet/datasets/openimages.py
  class OpenImagesDataset (line 21) | class OpenImagesDataset(CustomDataset):
    method __init__ (line 63) | def __init__(self,
    method get_classes_from_csv (line 127) | def get_classes_from_csv(self, label_file):
    method load_annotations (line 150) | def load_annotations(self, ann_file):
    method get_ann_info (line 220) | def get_ann_info(self, idx):
    method get_meta_from_file (line 291) | def get_meta_from_file(self, meta_file=''):
    method get_meta_from_pipeline (line 308) | def get_meta_from_pipeline(self, results):
    method get_img_shape (line 319) | def get_img_shape(self, metas):
    method prepare_test_img (line 332) | def prepare_test_img(self, idx):
    method _filter_imgs (line 344) | def _filter_imgs(self, min_size=32):
    method _set_group_flag (line 352) | def _set_group_flag(self):
    method get_relation_matrix (line 357) | def get_relation_matrix(self, hierarchy_file):
    method _convert_hierarchy_tree (line 380) | def _convert_hierarchy_tree(self,
    method add_supercategory_ann (line 426) | def add_supercategory_ann(self, annotations):
    method process_results (line 457) | def process_results(self, det_results, annotations,
    method load_image_label_from_csv (line 503) | def load_image_label_from_csv(self, image_level_ann_file):
    method get_image_level_ann (line 538) | def get_image_level_ann(self, image_level_ann_file):
    method denormalize_gt_bboxes (line 591) | def denormalize_gt_bboxes(self, annotations):
    method get_cat_ids (line 604) | def get_cat_ids(self, idx):
    method evaluate (line 615) | def evaluate(self,
  class OpenImagesChallengeDataset (line 727) | class OpenImagesChallengeDataset(OpenImagesDataset):
    method __init__ (line 730) | def __init__(self, ann_file, **kwargs):
    method get_classes_from_csv (line 735) | def get_classes_from_csv(self, label_file):
    method load_annotations (line 765) | def load_annotations(self, ann_file):
    method prepare_train_img (line 805) | def prepare_train_img(self, idx):
    method prepare_test_img (line 817) | def prepare_test_img(self, idx):
    method get_relation_matrix (line 830) | def get_relation_matrix(self, hierarchy_file):
    method get_ann_info (line 844) | def get_ann_info(self, idx):
    method load_image_label_from_csv (line 857) | def load_image_label_from_csv(self, image_level_ann_file):

FILE: mmdet/datasets/pipelines/auto_augment.py
  function level_to_value (line 14) | def level_to_value(level, max_value):
  function enhance_level_to_value (line 19) | def enhance_level_to_value(level, a=1.8, b=0.1):
  function random_negative (line 24) | def random_negative(value, random_negative_prob):
  function bbox2fields (line 29) | def bbox2fields():
  class AutoAugment (line 47) | class AutoAugment:
    method __init__ (line 90) | def __init__(self, policies):
    method __call__ (line 104) | def __call__(self, results):
    method __repr__ (line 108) | def __repr__(self):
  class Shear (line 113) | class Shear:
    method __init__ (line 136) | def __init__(self,
    method _shear_img (line 180) | def _shear_img(self,
    method _shear_bboxes (line 205) | def _shear_bboxes(self, results, magnitude):
    method _shear_masks (line 235) | def _shear_masks(self,
    method _shear_seg (line 251) | def _shear_seg(self,
    method _filter_invalid (line 267) | def _filter_invalid(self, results, min_bbox_size=0):
    method __call__ (line 286) | def __call__(self, results):
    method __repr__ (line 317) | def __repr__(self):
  class Rotate (line 331) | class Rotate:
    method __init__ (line 357) | def __init__(self,
    method _rotate_img (line 408) | def _rotate_img(self, results, angle, center=None, scale=1.0):
    method _rotate_bboxes (line 427) | def _rotate_bboxes(self, results, rotate_matrix):
    method _rotate_masks (line 462) | def _rotate_masks(self,
    method _rotate_seg (line 474) | def _rotate_seg(self,
    method _filter_invalid (line 487) | def _filter_invalid(self, results, min_bbox_size=0):
    method __call__ (line 506) | def __call__(self, results):
    method __repr__ (line 532) | def __repr__(self):
  class Translate (line 546) | class Translate:
    method __init__ (line 572) | def __init__(self,
    method _translate_img (line 613) | def _translate_img(self, results, offset, direction='horizontal'):
    method _translate_bboxes (line 628) | def _translate_bboxes(self, results, offset):
    method _translate_masks (line 646) | def _translate_masks(self,
    method _translate_seg (line 657) | def _translate_seg(self,
    method _filter_invalid (line 668) | def _filter_invalid(self, results, min_size=0):
    method __call__ (line 687) | def __call__(self, results):
  class ColorTransform (line 713) | class ColorTransform:
    method __init__ (line 722) | def __init__(self, level, prob=0.5):
    method _adjust_color_img (line 733) | def _adjust_color_img(self, results, factor=1.0):
    method __call__ (line 740) | def __call__(self, results):
    method __repr__ (line 754) | def __repr__(self):
  class EqualizeTransform (line 762) | class EqualizeTransform:
    method __init__ (line 770) | def __init__(self, prob=0.5):
    method _imequalize (line 775) | def _imequalize(self, results):
    method __call__ (line 781) | def __call__(self, results):
    method __repr__ (line 795) | def __repr__(self):
  class BrightnessTransform (line 801) | class BrightnessTransform:
    method __init__ (line 810) | def __init__(self, level, prob=0.5):
    method _adjust_brightness_img (line 821) | def _adjust_brightness_img(self, results, factor=1.0):
    method __call__ (line 828) | def __call__(self, results):
    method __repr__ (line 842) | def __repr__(self):
  class ContrastTransform (line 850) | class ContrastTransform:
    method __init__ (line 859) | def __init__(self, level, prob=0.5):
    method _adjust_contrast_img (line 870) | def _adjust_contrast_img(self, results, factor=1.0):
    method __call__ (line 876) | def __call__(self, results):
    method __repr__ (line 890) | def __repr__(self):

FILE: mmdet/datasets/pipelines/compose.py
  class Compose (line 10) | class Compose:
    method __init__ (line 18) | def __init__(self, transforms):
    method __call__ (line 30) | def __call__(self, data):
    method __repr__ (line 46) | def __repr__(self):

FILE: mmdet/datasets/pipelines/formatting.py
  function to_tensor (line 12) | def to_tensor(data):
  class ToTensor (line 38) | class ToTensor:
    method __init__ (line 45) | def __init__(self, keys):
    method __call__ (line 48) | def __call__(self, results):
    method __repr__ (line 62) | def __repr__(self):
  class ImageToTensor (line 67) | class ImageToTensor:
    method __init__ (line 78) | def __init__(self, keys):
    method __call__ (line 81) | def __call__(self, results):
    method __repr__ (line 99) | def __repr__(self):
  class Transpose (line 104) | class Transpose:
    method __init__ (line 112) | def __init__(self, keys, order):
    method __call__ (line 116) | def __call__(self, results):
    method __repr__ (line 130) | def __repr__(self):
  class ToDataContainer (line 136) | class ToDataContainer:
    method __init__ (line 147) | def __init__(self,
    method __call__ (line 152) | def __call__(self, results):
    method __repr__ (line 170) | def __repr__(self):
  class DefaultFormatBundle (line 175) | class DefaultFormatBundle:
    method __init__ (line 200) | def __init__(self,
    method __call__ (line 206) | def __call__(self, results):
    method _add_default_meta_keys (line 259) | def _add_default_meta_keys(self, results):
    method __repr__ (line 284) | def __repr__(self):
  class Collect (line 290) | class Collect:
    method __init__ (line 329) | def __init__(self,
    method __call__ (line 337) | def __call__(self, results):
    method __repr__ (line 360) | def __repr__(self):
  class WrapFieldsToLists (line 366) | class WrapFieldsToLists:
    method __call__ (line 386) | def __call__(self, results):
    method __repr__ (line 402) | def __repr__(self):

FILE: mmdet/datasets/pipelines/instaboost.py
  class InstaBoost (line 8) | class InstaBoost:
    method __init__ (line 35) | def __init__(self,
    method _load_anns (line 56) | def _load_anns(self, results):
    method _parse_anns (line 78) | def _parse_anns(self, results, anns, img):
    method __call__ (line 99) | def __call__(self, results):
    method __repr__ (line 115) | def __repr__(self):

FILE: mmdet/datasets/pipelines/loading.py
  class LoadImageFromFile (line 18) | class LoadImageFromFile:
    method __init__ (line 37) | def __init__(self,
    method __call__ (line 48) | def __call__(self, results):
    method __repr__ (line 81) | def __repr__(self):
  class LoadImageFromWebcam (line 91) | class LoadImageFromWebcam(LoadImageFromFile):
    method __call__ (line 98) | def __call__(self, results):
  class LoadMultiChannelImageFromFiles (line 123) | class LoadMultiChannelImageFromFiles:
    method __init__ (line 143) | def __init__(self,
    method __call__ (line 152) | def __call__(self, results):
    method __repr__ (line 197) | def __repr__(self):
  class LoadAnnotations (line 206) | class LoadAnnotations:
    method __init__ (line 228) | def __init__(self,
    method _load_bboxes (line 245) | def _load_bboxes(self, results):
    method _load_labels (line 277) | def _load_labels(self, results):
    method _poly2mask (line 290) | def _poly2mask(self, mask_ann, img_h, img_w):
    method process_polygons (line 317) | def process_polygons(self, polygons):
    method _load_masks (line 334) | def _load_masks(self, results):
    method _load_semantic_seg (line 359) | def _load_semantic_seg(self, results):
    method __call__ (line 380) | def __call__(self, results):
    method __repr__ (line 403) | def __repr__(self):
  class LoadPanopticAnnotations (line 415) | class LoadPanopticAnnotations(LoadAnnotations):
    method __init__ (line 432) | def __init__(self,
    method _load_masks_and_semantic_segs (line 453) | def _load_masks_and_semantic_segs(self, results):
    method __call__ (line 500) | def __call__(self, results):
  class LoadProposals (line 526) | class LoadProposals:
    method __init__ (line 536) | def __init__(self, num_max_proposals=None):
    method __call__ (line 539) | def __call__(self, results):
    method __repr__ (line 565) | def __repr__(self):
  class FilterAnnotations (line 571) | class FilterAnnotations:
    method __init__ (line 587) | def __init__(self,
    method __call__ (line 601) | def __call__(self, results):
    method __repr__ (line 639) | def __repr__(self):

FILE: mmdet/datasets/pipelines/test_time_aug.py
  class MultiScaleFlipAug (line 11) | class MultiScaleFlipAug:
    method __init__ (line 54) | def __init__(self,
    method __call__ (line 85) | def __call__(self, results):
    method __repr__ (line 116) | def __repr__(self):

FILE: mmdet/datasets/pipelines/transforms.py
  class Resize (line 31) | class Resize:
    method __init__ (line 76) | def __init__(self,
    method random_select (line 111) | def random_select(img_scales):
    method random_sample (line 129) | def random_sample(img_scales):
    method random_sample_ratio (line 156) | def random_sample_ratio(img_scale, ratio_range):
    method _random_scale (line 182) | def _random_scale(self, results):
    method _resize_img (line 215) | def _resize_img(self, results):
    method _resize_bboxes (line 248) | def _resize_bboxes(self, results):
    method _resize_masks (line 258) | def _resize_masks(self, results):
    method _resize_seg (line 268) | def _resize_seg(self, results):
    method __call__ (line 285) | def __call__(self, results):
    method __repr__ (line 322) | def __repr__(self):
  class RandomFlip (line 333) | class RandomFlip:
    method __init__ (line 370) | def __init__(self, flip_ratio=None, direction='horizontal'):
    method bbox_flip (line 396) | def bbox_flip(self, bboxes, img_shape, direction):
    method __call__ (line 430) | def __call__(self, results):
    method __repr__ (line 485) | def __repr__(self):
  class RandomShift (line 490) | class RandomShift:
    method __init__ (line 501) | def __init__(self, shift_ratio=0.5, max_shift_px=32, filter_thr_px=1):
    method __call__ (line 513) | def __call__(self, results):
    method __repr__ (line 573) | def __repr__(self):
  class Pad (line 580) | class Pad:
    method __init__ (line 596) | def __init__(self,
    method _pad_img (line 622) | def _pad_img(self, results):
    method _pad_masks (line 640) | def _pad_masks(self, results):
    method _pad_seg (line 647) | def _pad_seg(self, results):
    method __call__ (line 655) | def __call__(self, results):
    method __repr__ (line 669) | def __repr__(self):
  class Normalize (line 679) | class Normalize:
    method __init__ (line 691) | def __init__(self, mean, std, to_rgb=True):
    method __call__ (line 696) | def __call__(self, results):
    method __repr__ (line 713) | def __repr__(self):
  class RandomCrop (line 720) | class RandomCrop:
    method __init__ (line 756) | def __init__(self,
    method _crop_data (line 787) | def _crop_data(self, results, crop_size, allow_negative_crop):
    method _get_crop_size (line 854) | def _get_crop_size(self, image_size):
    method __call__ (line 884) | def __call__(self, results):
    method __repr__ (line 900) | def __repr__(self):
  class SegRescale (line 910) | class SegRescale:
    method __init__ (line 920) | def __init__(self, scale_factor=1, backend='cv2'):
    method __call__ (line 924) | def __call__(self, results):
    method __repr__ (line 943) | def __repr__(self):
  class PhotoMetricDistortion (line 948) | class PhotoMetricDistortion:
    method __init__ (line 969) | def __init__(self,
    method __call__ (line 979) | def __call__(self, results):
    method __repr__ (line 1040) | def __repr__(self):
  class Expand (line 1052) | class Expand:
    method __init__ (line 1065) | def __init__(self,
    method __call__ (line 1081) | def __call__(self, results):
    method __repr__ (line 1135) | def __repr__(self):
  class MinIoURandomCrop (line 1144) | class MinIoURandomCrop:
    method __init__ (line 1163) | def __init__(self,
    method __call__ (line 1181) | def __call__(self, results):
    method __repr__ (line 1275) | def __repr__(self):
  class Corrupt (line 1284) | class Corrupt:
    method __init__ (line 1295) | def __init__(self, corruption, severity=1):
    method __call__ (line 1299) | def __call__(self, results):
    method __repr__ (line 1320) | def __repr__(self):
  class Albu (line 1328) | class Albu:
    method __init__ (line 1370) | def __init__(self,
    method albu_builder (line 1413) | def albu_builder(self, cfg):
    method mapper (line 1448) | def mapper(d, keymap):
    method __call__ (line 1464) | def __call__(self, results):
    method __repr__ (line 1526) | def __repr__(self):
  class RandomCenterCropPad (line 1532) | class RandomCenterCropPad:
    method __init__ (line 1620) | def __init__(self,
    method _get_border (line 1666) | def _get_border(self, border, size):
    method _filter_boxes (line 1685) | def _filter_boxes(self, patch, boxes):
    method _crop_image_and_paste (line 1701) | def _crop_image_and_paste(self, image, center, size):
    method _train_aug (line 1752) | def _train_aug(self, results):
    method _test_aug (line 1820) | def _test_aug(self, results):
    method __call__ (line 1854) | def __call__(self, results):
    method __repr__ (line 1866) | def __repr__(self):
  class CutOut (line 1881) | class CutOut:
    method __init__ (line 1904) | def __init__(self,
    method __call__ (line 1925) | def __call__(self, results):
    method __repr__ (line 1945) | def __repr__(self):
  class Mosaic (line 1955) | class Mosaic:
    method __init__ (line 2008) | def __init__(self,
    method __call__ (line 2029) | def __call__(self, results):
    method get_indexes (line 2045) | def get_indexes(self, dataset):
    method _mosaic_transform (line 2058) | def _mosaic_transform(self, results):
    method _mosaic_combine (line 2155) | def _mosaic_combine(self, loc, center_position_xy, img_shape_wh):
    method _filter_box_candidates (line 2216) | def _filter_box_candidates(self, bboxes, labels):
    method __repr__ (line 2225) | def __repr__(self):
  class MixUp (line 2236) | class MixUp:
    method __init__ (line 2292) | def __init__(self,
    method __call__ (line 2316) | def __call__(self, results):
    method get_indexes (line 2329) | def get_indexes(self, dataset):
    method _mixup_transform (line 2347) | def _mixup_transform(self, results):
    method _filter_box_candidates (line 2470) | def _filter_box_candidates(self, bbox1, bbox2):
    method __repr__ (line 2485) | def __repr__(self):
  class RandomAffine (line 2500) | class RandomAffine:
    method __init__ (line 2539) | def __init__(self,
    method __call__ (line 2566) | def __call__(self, results):
    method filter_gt_bboxes (line 2649) | def filter_gt_bboxes(self, origin_bboxes, wrapped_bboxes):
    method __repr__ (line 2664) | def __repr__(self):
    method _get_rotation_matrix (line 2679) | def _get_rotation_matrix(rotate_degrees):
    method _get_scaling_matrix (line 2688) | def _get_scaling_matrix(scale_ratio):
    method _get_share_matrix (line 2695) | def _get_share_matrix(scale_ratio):
    method _get_shear_matrix (line 2702) | def _get_shear_matrix(x_shear_degrees, y_shear_degrees):
    method _get_translation_matrix (line 2711) | def _get_translation_matrix(x, y):
  class YOLOXHSVRandomAug (line 2718) | class YOLOXHSVRandomAug:
    method __init__ (line 2729) | def __init__(self, hue_delta=5, saturation_delta=30, value_delta=30):
    method __call__ (line 2734) | def __call__(self, results):
    method __repr__ (line 2753) | def __repr__(self):
  class CopyPaste (line 2762) | class CopyPaste:
    method __init__ (line 2794) | def __init__(
    method get_indexes (line 2807) | def get_indexes(self, dataset):
    method gen_masks_from_bboxes (line 2817) | def gen_masks_from_bboxes(self, bboxes, img_shape):
    method get_gt_masks (line 2837) | def get_gt_masks(self, results):
    method __call__ (line 2853) | def __call__(self, results):
    method _select_object (line 2879) | def _select_object(self, results):
    method _copy_paste (line 2898) | def _copy_paste(self, dst_results, src_results):
    method get_updated_masks (line 2956) | def get_updated_masks(self, masks, composed_mask):
    method __repr__ (line 2962) | def __repr__(self):

FILE: mmdet/datasets/samplers/class_aware_sampler.py
  class ClassAwareSampler (line 11) | class ClassAwareSampler(Sampler):
    method __init__ (line 40) | def __init__(self,
    method __iter__ (line 85) | def __iter__(self):
    method __len__ (line 131) | def __len__(self):
    method set_epoch (line 134) | def set_epoch(self, epoch):
  class RandomCycleIter (line 138) | class RandomCycleIter:
    method __init__ (line 156) | def __init__(self, data, generator=None):
    method __iter__ (line 163) | def __iter__(self):
    method __len__ (line 166) | def __len__(self):
    method __next__ (line 169) | def __next__(self):

FILE: mmdet/datasets/samplers/distributed_sampler.py
  class DistributedSampler (line 11) | class DistributedSampler(_DistributedSampler):
    method __init__ (line 13) | def __init__(self,
    method __iter__ (line 31) | def __iter__(self):

FILE: mmdet/datasets/samplers/group_sampler.py
  class GroupSampler (line 10) | class GroupSampler(Sampler):
    method __init__ (line 12) | def __init__(self, dataset, samples_per_gpu=1):
    method __iter__ (line 23) | def __iter__(self):
    method __len__ (line 47) | def __len__(self):
  class DistributedGroupSampler (line 51) | class DistributedGroupSampler(Sampler):
    method __init__ (line 72) | def __init__(self,
    method __iter__ (line 101) | def __iter__(self):
    method __len__ (line 144) | def __len__(self):
    method set_epoch (line 147) | def set_epoch(self, epoch):

FILE: mmdet/datasets/samplers/infinite_sampler.py
  class InfiniteGroupBatchSampler (line 12) | class InfiniteGroupBatchSampler(Sampler):
    method __init__ (line 37) | def __init__(self,
    method _infinite_indices (line 71) | def _infinite_indices(self):
    method _indices_of_rank (line 82) | def _indices_of_rank(self):
    method __iter__ (line 87) | def __iter__(self):
    method __len__ (line 97) | def __len__(self):
    method set_epoch (line 101) | def set_epoch(self, epoch):
  class InfiniteBatchSampler (line 106) | class InfiniteBatchSampler(Sampler):
    method __init__ (line 128) | def __init__(self,
    method _infinite_indices (line 155) | def _infinite_indices(self):
    method _indices_of_rank (line 166) | def _indices_of_rank(self):
    method __iter__ (line 171) | def __iter__(self):
    method __len__ (line 180) | def __len__(self):
    method set_epoch (line 184) | def set_epoch(self, epoch):

FILE: mmdet/datasets/utils.py
  function replace_ImageToTensor (line 15) | def replace_ImageToTensor(pipelines):
  function get_loading_pipeline (line 75) | def get_loading_pipeline(pipeline):
  class NumClassCheckHook (line 118) | class NumClassCheckHook(Hook):
    method _check_head (line 120) | def _check_head(self, runner):
    method before_train_epoch (line 152) | def before_train_epoch(self, runner):
    method before_val_epoch (line 160) | def before_val_epoch(self, runner):

FILE: mmdet/datasets/voc.py
  class VOCDataset (line 12) | class VOCDataset(XMLDataset):
    method __init__ (line 25) | def __init__(self, **kwargs):
    method evaluate (line 34) | def evaluate(self,

FILE: mmdet/datasets/wider_face.py
  class WIDERFaceDataset (line 12) | class WIDERFaceDataset(XMLDataset):
    method __init__ (line 22) | def __init__(self, **kwargs):
    method load_annotations (line 25) | def load_annotations(self, ann_file):

FILE: mmdet/datasets/xml_style.py
  class XMLDataset (line 14) | class XMLDataset(CustomDataset):
    method __init__ (line 25) | def __init__(self,
    method load_annotations (line 38) | def load_annotations(self, ann_file):
    method _filter_imgs (line 69) | def _filter_imgs(self, min_size=32):
    method get_ann_info (line 90) | def get_ann_info(self, idx):
    method get_cat_ids (line 156) | def get_cat_ids(self, idx):

FILE: mmdet/models/backbones/csp_darknet.py
  class Focus (line 14) | class Focus(nn.Module):
    method __init__ (line 30) | def __init__(self,
    method forward (line 49) | def forward(self, x):
  class SPPBottleneck (line 67) | class SPPBottleneck(BaseModule):
    method __init__ (line 85) | def __init__(self,
    method forward (line 116) | def forward(self, x):
  class CSPDarknet (line 124) | class CSPDarknet(BaseModule):
    method __init__ (line 177) | def __init__(self,
    method _freeze_stages (line 261) | def _freeze_stages(self):
    method train (line 269) | def train(self, mode=True):
    method forward (line 277) | def forward(self, x):

FILE: mmdet/models/backbones/darknet.py
  class ResBlock (line 14) | class ResBlock(BaseModule):
    method __init__ (line 33) | def __init__(self,
    method forward (line 50) | def forward(self, x):
  class Darknet (line 60) | class Darknet(BaseModule):
    method __init__ (line 101) | def __init__(self,
    method forward (line 153) | def forward(self, x):
    method _freeze_stages (line 163) | def _freeze_stages(self):
    method train (line 171) | def train(self, mode=True):
    method make_conv_res_block (line 180) | def make_conv_res_block(in_channels,

FILE: mmdet/models/backbones/detectors_resnet.py
  class Bottleneck (line 16) | class Bottleneck(_Bottleneck):
    method __init__ (line 36) | def __init__(self,
    method rfp_forward (line 73) | def rfp_forward(self, x, rfp_feat):
  class ResLayer (line 120) | class ResLayer(Sequential):
    method __init__ (line 146) | def __init__(self,
  class DetectoRS_ResNet (line 212) | class DetectoRS_ResNet(ResNet):
    method __init__ (line 234) | def __init__(self,
    method init_weights (line 296) | def init_weights(self):
    method make_res_layer (line 325) | def make_res_layer(self, **kwargs):
    method forward (line 329) | def forward(self, x):
    method rfp_forward (line 336) | def rfp_forward(self, x, rfp_feats):

FILE: mmdet/models/backbones/detectors_resnext.py
  class Bottleneck (line 11) | class Bottleneck(_Bottleneck):
    method __init__ (line 14) | def __init__(self,
  class DetectoRS_ResNeXt (line 99) | class DetectoRS_ResNeXt(DetectoRS_ResNet):
    method __init__ (line 113) | def __init__(self, groups=1, base_width=4, **kwargs):
    method make_res_layer (line 118) | def make_res_layer(self, **kwargs):

FILE: mmdet/models/backbones/efficientnet.py
  class EdgeResidual (line 16) | class EdgeResidual(BaseModule):
    method __init__ (line 41) | def __init__(self,
    method forward (line 91) | def forward(self, x):
  function model_scaling (line 115) | def model_scaling(layer_setting, arch_setting):
  class EfficientNet (line 159) | class EfficientNet(BaseModule):
    method __init__ (line 254) | def __init__(self,
    method make_layer (line 327) | def make_layer(self):
    method forward (line 395) | def forward(self, x):
    method _freeze_stages (line 404) | def _freeze_stages(self):
    method train (line 411) | def train(self, mode=True):

FILE: mmdet/models/backbones/hourglass.py
  class HourglassModule (line 12) | class HourglassModule(BaseModule):
    method __init__ (line 30) | def __init__(self,
    method forward (line 80) | def forward(self, x):
  class HourglassNet (line 97) | class HourglassNet(BaseModule):
    method __init__ (line 131) | def __init__(self,
    method init_weights (line 195) | def init_weights(self):
    method forward (line 203) | def forward(self, x):

FILE: mmdet/models/backbones/hrnet.py
  class HRModule (line 13) | class HRModule(BaseModule):
    method __init__ (line 20) | def __init__(self,
    method _check_branches (line 49) | def _check_branches(self, num_branches, num_blocks, in_channels,
    method _make_one_branch (line 66) | def _make_one_branch(self,
    method _make_branches (line 112) | def _make_branches(self, num_branches, block, num_blocks, num_channels):
    method _make_fuse_layers (line 121) | def _make_fuse_layers(self):
    method forward (line 183) | def forward(self, x):
  class HRNet (line 204) | class HRNet(BaseModule):
    method __init__ (line 281) | def __init__(self,
    method norm1 (line 403) | def norm1(self):
    method norm2 (line 408) | def norm2(self):
    method _make_transition_layer (line 412) | def _make_transition_layer(self, num_channels_pre_layer,
    method _make_layer (line 458) | def _make_layer(self, block, inplanes, planes, blocks, stride=1):
    method _make_stage (line 505) | def _make_stage(self, layer_config, in_channels, multiscale_output=True):
    method forward (line 545) | def forward(self, x):
    method train (line 581) | def train(self, mode=True):

FILE: mmdet/models/backbones/mobilenet_v2.py
  class MobileNetV2 (line 14) | class MobileNetV2(BaseModule):
    method __init__ (line 46) | def __init__(self,
    method make_layer (line 138) | def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
    method _freeze_stages (line 167) | def _freeze_stages(self):
    method forward (line 177) | def forward(self, x):
    method train (line 188) | def train(self, mode=True):

FILE: mmdet/models/backbones/pvt.py
  class MixFFN (line 23) | class MixFFN(BaseModule):
    method __init__ (line 47) | def __init__(self,
    method forward (line 93) | def forward(self, x, hw_shape, identity=None):
  class SpatialReductionAttention (line 102) | class SpatialReductionAttention(MultiheadAttention):
    method __init__ (line 129) | def __init__(self,
    method forward (line 169) | def forward(self, x, hw_shape, identity=None):
    method legacy_forward (line 200) | def legacy_forward(self, x, hw_shape, identity=None):
  class PVTEncoderLayer (line 219) | class PVTEncoderLayer(BaseModule):
    method __init__ (line 245) | def __init__(self,
    method forward (line 284) | def forward(self, x, hw_shape):
  class AbsolutePositionEmbedding (line 291) | class AbsolutePositionEmbedding(BaseModule):
    method __init__ (line 301) | def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None):
    method init_weights (line 319) | def init_weights(self):
    method resize_pos_embed (line 322) | def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'):
    method forward (line 351) | def forward(self, x, hw_shape, mode='bilinear'):
  class PyramidVisionTransformer (line 357) | class PyramidVisionTransformer(BaseModule):
    method __init__ (line 410) | def __init__(self,
    method init_weights (line 523) | def init_weights(self):
    method forward (line 563) | def forward(self, x):
  class PyramidVisionTransformerV2 (line 580) | class PyramidVisionTransformerV2(PyramidVisionTransformer):
    method __init__ (line 584) | def __init__(self, **kwargs):

FILE: mmdet/models/backbones/regnet.py
  class RegNet (line 14) | class RegNet(ResNet):
    method __init__ (line 90) | def __init__(self,
    method _make_stem_layer (line 238) | def _make_stem_layer(self, in_channels, base_channels):
    method generate_regnet (line 252) | def generate_regnet(self,
    method quantize_float (line 285) | def quantize_float(number, divisor):
    method adjust_width_group (line 297) | def adjust_width_group(self, widths, bottleneck_ratio, groups):
    method get_stages_from_blocks (line 322) | def get_stages_from_blocks(self, widths):
    method forward (line 344) | def forward(self, x):

FILE: mmdet/models/backbones/res2net.py
  class Bottle2neck (line 15) | class Bottle2neck(_Bottleneck):
    method __init__ (line 18) | def __init__(self,
    method forward (line 106) | def forward(self, x):
  class Res2Layer (line 163) | class Res2Layer(Sequential):
    method __init__ (line 182) | def __init__(self,
  class Res2Net (line 243) | class Res2Net(ResNet):
    method __init__ (line 303) | def __init__(self,
    method make_res_layer (line 322) | def make_res_layer(self, **kwargs):

FILE: mmdet/models/backbones/resnest.py
  class RSoftmax (line 17) | class RSoftmax(nn.Module):
    method __init__ (line 25) | def __init__(self, radix, groups):
    method forward (line 30) | def forward(self, x):
  class SplitAttentionConv2d (line 41) | class SplitAttentionConv2d(BaseModule):
    method __init__ (line 64) | def __init__(self,
    method norm0 (line 116) | def norm0(self):
    method norm1 (line 121) | def norm1(self):
    method forward (line 125) | def forward(self, x):
  class Bottleneck (line 154) | class Bottleneck(_Bottleneck):
    method __init__ (line 173) | def __init__(self,
    method forward (line 234) | def forward(self, x):
  class ResNeSt (line 278) | class ResNeSt(ResNetV1d):
    method __init__ (line 299) | def __init__(self,
    method make_res_layer (line 313) | def make_res_layer(self, **kwargs):

FILE: mmdet/models/backbones/resnet.py
  class BasicBlock (line 14) | class BasicBlock(BaseModule):
    method __init__ (line 17) | def __init__(self,
    method norm1 (line 58) | def norm1(self):
    method norm2 (line 63) | def norm2(self):
    method forward (line 67) | def forward(self, x):
  class Bottleneck (line 97) | class Bottleneck(BaseModule):
    method __init__ (line 100) | def __init__(self,
    method make_block_plugins (line 219) | def make_block_plugins(self, in_channels, plugins):
    method forward_plugin (line 242) | def forward_plugin(self, x, plugin_names):
    method norm1 (line 249) | def norm1(self):
    method norm2 (line 254) | def norm2(self):
    method norm3 (line 259) | def norm3(self):
    method forward (line 263) | def forward(self, x):
  class ResNet (line 306) | class ResNet(BaseModule):
    method __init__ (line 369) | def __init__(self,
    method make_stage_plugins (line 494) | def make_stage_plugins(self, plugins, stage_idx):
    method make_res_layer (line 556) | def make_res_layer(self, **kwargs):
    method norm1 (line 561) | def norm1(self):
    method _make_stem_layer (line 565) | def _make_stem_layer(self, in_channels, stem_channels):
    method _freeze_stages (line 613) | def _freeze_stages(self):
    method forward (line 631) | def forward(self, x):
    method train (line 648) | def train(self, mode=True):
  class ResNetV1d (line 661) | class ResNetV1d(ResNet):
    method __init__ (line 670) | def __init__(self, **kwargs):

FILE: mmdet/models/backbones/resnext.py
  class Bottleneck (line 12) | class Bottleneck(_Bottleneck):
    method __init__ (line 15) | def __init__(self,
    method _del_block_plugins (line 98) | def _del_block_plugins(self, plugin_names):
  class ResNeXt (line 110) | class ResNeXt(ResNet):
    method __init__ (line 143) | def __init__(self, groups=1, base_width=4, **kwargs):
    method make_res_layer (line 148) | def make_res_layer(self, **kwargs):

FILE: mmdet/models/backbones/ssd_vgg.py
  class SSDVGG (line 13) | class SSDVGG(VGG, BaseModule):
    method __init__ (line 50) | def __init__(self,
    method init_weights (line 105) | def init_weights(self, pretrained=None):
    method forward (line 108) | def forward(self, x):
  class L2Norm (line 122) | class L2Norm(ssd_neck.L2Norm):
    method __init__ (line 124) | def __init__(self, **kwargs):

FILE: mmdet/models/backbones/swin.py
  class WindowMSA (line 22) | class WindowMSA(BaseModule):
    method __init__ (line 41) | def __init__(self,
    method init_weights (line 78) | def init_weights(self):
    method forward (line 81) | def forward(self, x, mask=None):
    method double_step_seq (line 122) | def double_step_seq(step1, len1, step2, len2):
  class ShiftWindowMSA (line 128) | class ShiftWindowMSA(BaseModule):
    method __init__ (line 151) | def __init__(self,
    method forward (line 180) | def forward(self, query, hw_shape):
    method window_reverse (line 256) | def window_reverse(self, windows, H, W):
    method window_partition (line 272) | def window_partition(self, x):
  class SwinBlock (line 288) | class SwinBlock(BaseModule):
    method __init__ (line 313) | def __init__(self,
    method forward (line 358) | def forward(self, x, hw_shape):
  class SwinBlockSequence (line 381) | class SwinBlockSequence(BaseModule):
    method __init__ (line 410) | def __init__(self,
    method forward (line 455) | def forward(self, x, hw_shape):
  class SwinTransformer (line 467) | class SwinTransformer(BaseModule):
    method __init__ (line 524) | def __init__(self,
    method train (line 642) | def train(self, mode=True):
    method _freeze_stages (line 647) | def _freeze_stages(self):
    method init_weights (line 669) | def init_weights(self):
    method forward (line 744) | def forward(self, x):

FILE: mmdet/models/backbones/trident_resnet.py
  class TridentConv (line 14) | class TridentConv(BaseModule):
    method __init__ (line 33) | def __init__(self,
    method extra_repr (line 61) | def extra_repr(self):
    method forward (line 73) | def forward(self, inputs):
  class TridentBottleneck (line 93) | class TridentBottleneck(Bottleneck):
    method __init__ (line 106) | def __init__(self, trident_dilations, test_branch_idx, concat_output,
    method forward (line 128) | def forward(self, x):
  function make_trident_res_layer (line 182) | def make_trident_res_layer(block,
  class TridentResNet (line 235) | class TridentResNet(ResNet):
    method __init__ (line 256) | def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,

FILE: mmdet/models/builder.py
  function build_backbone (line 18) | def build_backbone(cfg):
  function build_neck (line 23) | def build_neck(cfg):
  function build_roi_extractor (line 28) | def build_roi_extractor(cfg):
  function build_shared_head (line 33) | def build_shared_head(cfg):
  function build_head (line 38) | def build_head(cfg):
  function build_loss (line 43) | def build_loss(cfg):
  function build_detector (line 48) | def build_detector(cfg, train_cfg=None, test_cfg=None):

FILE: mmdet/models/dense_heads/anchor_free_head.py
  class AnchorFreeHead (line 18) | class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
    method __init__ (line 46) | def __init__(self,
    method _init_layers (line 107) | def _init_layers(self):
    method _init_cls_convs (line 113) | def _init_cls_convs(self):
    method _init_reg_convs (line 133) | def _init_reg_convs(self):
    method _init_predictor (line 153) | def _init_predictor(self):
    method _load_from_state_dict (line 159) | def _load_from_state_dict(self, state_dict, prefix, local_metadata, st...
    method forward (line 197) | def forward(self, feats):
    method forward_single (line 215) | def forward_single(self, x):
    method loss (line 240) | def loss(self,
    method get_targets (line 268) | def get_targets(self, points, gt_bboxes_list, gt_labels_list):
    method _get_points_single (line 282) | def _get_points_single(self,
    method get_points (line 310) | def get_points(self, featmap_sizes, dtype, device, flatten=False):
    method aug_test (line 334) | def aug_test(self, feats, img_metas, rescale=False):

FILE: mmdet/models/dense_heads/anchor_head.py
  class AnchorHead (line 17) | class AnchorHead(BaseDenseHead, BBoxTestMixin):
    method __init__ (line 39) | def __init__(self,
    method num_anchors (line 115) | def num_anchors(self):
    method anchor_generator (line 122) | def anchor_generator(self):
    method _init_layers (line 127) | def _init_layers(self):
    method forward_single (line 135) | def forward_single(self, x):
    method forward (line 152) | def forward(self, feats):
    method get_anchors (line 171) | def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
    method _get_targets_single (line 201) | def _get_targets_single(self,
    method get_targets (line 299) | def get_targets(self,
    method loss_single (line 402) | def loss_single(self, cls_score, bbox_pred, anchors, labels, label_wei...
    method loss (line 453) | def loss(self,
    method aug_test (line 522) | def aug_test(self, feats, img_metas, rescale=False):

FILE: mmdet/models/dense_heads/ascend_anchor_head.py
  class AscendAnchorHead (line 13) | class AscendAnchorHead(AnchorHead):
    method __init__ (line 35) | def __init__(self,
    method get_batch_gt_bboxes (line 72) | def get_batch_gt_bboxes(self, gt_bboxes_list, num_images, gt_nums, dev...
    method get_batch_gt_bboxes_ignore (line 109) | def get_batch_gt_bboxes_ignore(self, gt_bboxes_ignore_list, num_images,
    method get_batch_gt_labels (line 130) | def get_batch_gt_labels(self, gt_labels_list, num_images, gt_nums, dev...
    method _get_targets_concat (line 153) | def _get_targets_concat(self,
    method get_targets (line 255) | def get_targets(self,

FILE: mmdet/models/dense_heads/ascend_retina_head.py
  class AscendRetinaHead (line 8) | class AscendRetinaHead(RetinaHead, AscendAnchorHead):
    method __init__ (line 27) | def __init__(self,
    method get_targets (line 59) | def get_targets(self,

FILE: mmdet/models/dense_heads/ascend_ssd_head.py
  class AscendSSDHead (line 13) | class AscendSSDHead(SSDHead, AscendAnchorHead):
    method __init__ (line 44) | def __init__(self,
    method get_static_anchors (line 92) | def get_static_anchors(self, featmap_sizes, img_metas, device='cuda'):
    method get_targets (line 113) | def get_targets(self,
    method batch_loss (line 180) | def batch_loss(self, batch_cls_score, batch_bbox_pred, batch_anchor,
    method loss (line 261) | def loss(self,

FILE: mmdet/models/dense_heads/atss_head.py
  class ATSSHead (line 14) | class ATSSHead(AnchorHead):
    method __init__ (line 24) | def __init__(self,
    method _init_layers (line 65) | def _init_layers(self):
    method forward (line 109) | def forward(self, feats):
    method forward_single (line 127) | def forward_single(self, x, scale):
    method loss_single (line 156) | def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels,
    method loss (line 230) | def loss(self,
    method centerness_target (line 305) | def centerness_target(self, anchors, gts):
    method get_targets (line 322) | def get_targets(self,
    method _get_target_single (line 386) | def _get_target_single(self,
    method get_num_level_anchors_inside (line 496) | def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):

FILE: mmdet/models/dense_heads/autoassign_head.py
  class CenterPrior (line 21) | class CenterPrior(nn.Module):
    method __init__ (line 37) | def __init__(self,
    method forward (line 49) | def forward(self, anchor_points_list, gt_bboxes, labels,
  class AutoAssignHead (line 128) | class AutoAssignHead(FCOSHead):
    method __init__ (line 148) | def __init__(self,
    method init_weights (line 167) | def init_weights(self):
    method forward_single (line 179) | def forward_single(self, x, scale, stride):
    method get_pos_loss_single (line 207) | def get_pos_loss_single(self, cls_score, objectness, reg_loss, gt_labels,
    method get_neg_loss_single (line 251) | def get_neg_loss_single(self, cls_score, objectness, gt_labels, ious,
    method loss (line 307) | def loss(self,
    method get_targets (line 439) | def get_targets(self, points, gt_bboxes_list):
    method _get_target_single (line 468) | def _get_target_single(self, gt_bboxes, points):
    method _get_points_single (line 506) | def _get_points_single(self,

FILE: mmdet/models/dense_heads/base_dense_head.py
  class BaseDenseHead (line 12) | class BaseDenseHead(BaseModule, metaclass=ABCMeta):
    method __init__ (line 15) | def __init__(self, init_cfg=None):
    method init_weights (line 18) | def init_weights(self):
    method loss (line 27) | def loss(self, **kwargs):
    method get_bboxes (line 32) | def get_bboxes(self,
    method _get_bboxes_single (line 109) | def _get_bboxes_single(self,
    method _bbox_post_process (line 226) | def _bbox_post_process(self,
    method forward_train (line 303) | def forward_train(self,
    method simple_test (line 343) | def simple_test(self, feats, img_metas, rescale=False):
    method onnx_export (line 363) | def onnx_export(self,

FILE: mmdet/models/dense_heads/base_mask_head.py
  class BaseMaskHead (line 7) | class BaseMaskHead(BaseModule, metaclass=ABCMeta):
    method __init__ (line 10) | def __init__(self, init_cfg):
    method loss (line 14) | def loss(self, **kwargs):
    method get_results (line 18) | def get_results(self, **kwargs):
    method forward_train (line 22) | def forward_train(self,
    method simple_test (line 73) | def simple_test(self,
    method onnx_export (line 114) | def onnx_export(self, img, img_metas):

FILE: mmdet/models/dense_heads/cascade_rpn_head.py
  class AdaptiveConv (line 20) | class AdaptiveConv(BaseModule):
    method __init__ (line 42) | def __init__(self,
    method forward (line 79) | def forward(self, x, offset):
  class StageCascadeRPNHead (line 96) | class StageCascadeRPNHead(RPNHead):
    method __init__ (line 112) | def __init__(self,
    method _init_layers (line 153) | def _init_layers(self):
    method forward_single (line 164) | def forward_single(self, x, offset):
    method forward (line 174) | def forward(self, feats, offset_list=None):
    method _region_targets_single (line 180) | def _region_targets_single(self,
    method region_targets (line 236) | def region_targets(self,
    method get_targets (line 286) | def get_targets(self,
    method anchor_offset (line 328) | def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes):
    method loss_single (line 406) | def loss_single(self, cls_score, bbox_pred, anchors, labels, label_wei...
    method loss (line 436) | def loss(self,
    method get_bboxes (line 505) | def get_bboxes(self,
    method _get_bboxes_single (line 547) | def _get_bboxes_single(self,
    method refine_bboxes (line 672) | def refine_bboxes(self, anchor_list, bbox_preds, img_metas):
  class CascadeRPNHead (line 690) | class CascadeRPNHead(BaseDenseHead):
    method __init__ (line 705) | def __init__(self, num_stages, stages, train_cfg, test_cfg, init_cfg=N...
    method loss (line 720) | def loss(self):
    method get_bboxes (line 724) | def get_bboxes(self):
    method forward_train (line 728) | def forward_train(self,
    method simple_test_rpn (line 773) | def simple_test_rpn(self, x, img_metas):
    method aug_test_rpn (line 798) | def aug_test_rpn(self, x, img_metas):

FILE: mmdet/models/dense_heads/centernet_head.py
  class CenterNetHead (line 18) | class CenterNetHead(BaseDenseHead, BBoxTestMixin):
    method __init__ (line 38) | def __init__(self,
    method _build_head (line 64) | def _build_head(self, in_channel, feat_channel, out_channel):
    method init_weights (line 72) | def init_weights(self):
    method forward (line 81) | def forward(self, feats):
    method forward_single (line 98) | def forward_single(self, feat):
    method loss (line 116) | def loss(self,
    method get_targets (line 181) | def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape):
    method get_bboxes (line 252) | def get_bboxes(self,
    method _get_bboxes_single (line 297) | def _get_bboxes_single(self,
    method decode_heatmap (line 351) | def decode_heatmap(self,
    method _bboxes_nms (line 402) | def _bboxes_nms(self, bboxes, labels, cfg):

FILE: mmdet/models/dense_heads/centripetal_head.py
  class CentripetalHead (line 13) | class CentripetalHead(CornerHead):
    method __init__ (line 48) | def __init__(self,
    method _init_centripetal_layers (line 73) | def _init_centripetal_layers(self):
    method _init_layers (line 133) | def _init_layers(self):
    method init_weights (line 141) | def init_weights(self):
    method forward_single (line 157) | def forward_single(self, x, lvl_ind):
    method loss (line 208) | def loss(self,
    method loss_single (line 284) | def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift,
    method get_bboxes (line 367) | def get_bboxes(self,

FILE: mmdet/models/dense_heads/corner_head.py
  class BiCornerPool (line 21) | class BiCornerPool(BaseModule):
    method __init__ (line 34) | def __init__(self,
    method forward (line 64) | def forward(self, x):
  class CornerHead (line 85) | class CornerHead(BaseDenseHead, BBoxTestMixin):
    method __init__ (line 118) | def __init__(self,
    method _make_layers (line 158) | def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
    method _init_corner_kpt_layers (line 165) | def _init_corner_kpt_layers(self):
    method _init_corner_emb_layers (line 203) | def _init_corner_emb_layers(self):
    method _init_layers (line 221) | def _init_layers(self):
    method init_weights (line 230) | def init_weights(self):
    method forward (line 248) | def forward(self, feats):
    method forward_single (line 280) | def forward_single(self, x, lvl_ind, return_pool=False):
    method get_targets (line 325) | def get_targets(self,
    method loss (line 514) | def loss(self,
    method loss_single (line 576) | def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
    method get_bboxes (line 655) | def get_bboxes(self,
    method _get_bboxes_single (line 704) | def _get_bboxes_single(self,
    method _bboxes_nms (line 777) | def _bboxes_nms(self, bboxes, labels, cfg):
    method decode_heatmap (line 795) | def decode_heatmap(self,
    method onnx_export (line 1031) | def onnx_export(self,

FILE: mmdet/models/dense_heads/ddod_head.py
  class DDODHead (line 17) | class DDODHead(AnchorHead):
    method __init__ (line 37) | def __init__(self,
    method _init_layers (line 63) | def _init_layers(self):
    method init_weights (line 110) | def init_weights(self):
    method forward (line 121) | def forward(self, feats):
    method forward_single (line 142) | def forward_single(self, x, scale):
    method loss_cls_single (line 171) | def loss_cls_single(self, cls_score, labels, label_weights,
    method loss_reg_single (line 198) | def loss_reg_single(self, anchors, bbox_pred, iou_pred, labels,
    method calc_reweight_factor (line 275) | def calc_reweight_factor(self, labels_list):
    method loss (line 295) | def loss(self,
    method process_predictions_and_anchors (line 413) | def process_predictions_and_anchors(self, anchor_list, valid_flag_list,
    method get_cls_targets (line 483) | def get_cls_targets(self,
    method get_reg_targets (line 561) | def get_reg_targets(self,
    method _get_target_single (line 634) | def _get_target_single(self,
    method get_num_level_anchors_inside (line 761) | def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):

FILE: mmdet/models/dense_heads/deformable_detr_head.py
  class DeformableDETRHead (line 17) | class DeformableDETRHead(DETRHead):
    method __init__ (line 36) | def __init__(self,
    method _init_layers (line 50) | def _init_layers(self):
    method init_weights (line 83) | def init_weights(self):
    method forward (line 97) | def forward(self, mlvl_feats, img_metas):
    method loss (line 184) | def loss(self,
    method get_bboxes (line 269) | def get_bboxes(self,

FILE: mmdet/models/dense_heads/dense_test_mixins.py
  class BBoxTestMixin (line 14) | class BBoxTestMixin(object):
    method simple_test_bboxes (line 17) | def simple_test_bboxes(self, feats, img_metas, rescale=False):
    method aug_test_bboxes (line 41) | def aug_test_bboxes(self, feats, img_metas, rescale=False):
    method simple_test_rpn (line 116) | def simple_test_rpn(self, x, img_metas):
    method aug_test_rpn (line 133) | def aug_test_rpn(self, feats, img_metas):
    method async_simple_test_rpn (line 169) | async def async_simple_test_rpn(self, x, img_metas):
    method merge_aug_bboxes (line 179) | def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):

FILE: mmdet/models/dense_heads/detr_head.py
  class DETRHead (line 18) | class DETRHead(AnchorFreeHead):
    method __init__ (line 52) | def __init__(self,
    method _init_layers (line 152) | def _init_layers(self):
    method init_weights (line 167) | def init_weights(self):
    method _load_from_state_dict (line 172) | def _load_from_state_dict(self, state_dict, prefix, local_metadata, st...
    method forward (line 202) | def forward(self, feats, img_metas):
    method forward_single (line 226) | def forward_single(self, x, img_metas):
    method loss (line 268) | def loss(self,
    method loss_single (line 334) | def loss_single(self,
    method get_targets (line 419) | def get_targets(self,
    method _get_target_single (line 476) | def _get_target_single(self,
    method forward_train (line 547) | def forward_train(self,
    method get_bboxes (line 583) | def get_bboxes(self,
    method _get_bboxes_single (line 628) | def _get_bboxes_single(self,
    method simple_test_bboxes (line 685) | def simple_test_bboxes(self, feats, img_metas, rescale=False):
    method forward_onnx (line 707) | def forward_onnx(self, feats, img_metas):
    method forward_single_onnx (line 735) | def forward_single_onnx(self, x, img_metas):
    method onnx_export (line 775) | def onnx_export(self, all_cls_scores_list, all_bbox_preds_list, img_me...

FILE: mmdet/models/dense_heads/embedding_rpn_head.py
  class EmbeddingRPNHead (line 11) | class EmbeddingRPNHead(BaseModule):
    method __init__ (line 26) | def __init__(self,
    method _init_layers (line 38) | def _init_layers(self):
    method init_weights (line 44) | def init_weights(self):
    method _decode_init_proposals (line 54) | def _decode_init_proposals(self, imgs, img_metas):
    method forward_dummy (line 95) | def forward_dummy(self, img, img_metas):
    method forward_train (line 102) | def forward_train(self, img, img_metas):
    method simple_test_rpn (line 106) | def simple_test_rpn(self, img, img_metas):
    method simple_test (line 110) | def simple_test(self, img, img_metas):
    method aug_test_rpn (line 114) | def aug_test_rpn(self, feats, img_metas):

FILE: mmdet/models/dense_heads/fcos_head.py
  class FCOSHead (line 17) | class FCOSHead(AnchorFreeHead):
    method __init__ (line 60) | def __init__(self,
    method _init_layers (line 106) | def _init_layers(self):
    method forward (line 112) | def forward(self, feats):
    method forward_single (line 133) | def forward_single(self, x, scale, stride):
    method loss (line 168) | def loss(self,
    method get_targets (line 270) | def get_targets(self, points, gt_bboxes_list, gt_labels_list):
    method _get_target_single (line 331) | def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ran...
    method centerness_target (line 415) | def centerness_target(self, pos_bbox_targets):
    method _get_points_single (line 436) | def _get_points_single(self,

FILE: mmdet/models/dense_heads/fovea_head.py
  class FeatureAlign (line 18) | class FeatureAlign(BaseModule):
    method __init__ (line 20) | def __init__(self,
    method forward (line 43) | def forward(self, x, shape):
  class FoveaHead (line 50) | class FoveaHead(AnchorFreeHead):
    method __init__ (line 55) | def __init__(self,
    method _init_layers (line 81) | def _init_layers(self):
    method forward_single (line 121) | def forward_single(self, x):
    method loss (line 134) | def loss(self,
    method get_targets (line 186) | def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, poin...
    method _get_target_single (line 208) | def _get_target_single(self,
    method _get_bboxes_single (line 272) | def _get_bboxes_single(self,
    method _bbox_decode (line 358) | def _bbox_decode(self, priors, bbox_pred, base_len, max_shape):
    method _get_points_single (line 374) | def _get_points_single(self, *args, **kwargs):

FILE: mmdet/models/dense_heads/free_anchor_retina_head.py
  class FreeAnchorRetinaHead (line 13) | class FreeAnchorRetinaHead(RetinaHead):
    method __init__ (line 34) | def __init__(self,
    method loss (line 54) | def loss(self,
    method positive_bag_loss (line 221) | def positive_bag_loss(self, matched_cls_prob, matched_box_prob):
    method negative_bag_loss (line 248) | def negative_bag_loss(self, cls_prob, box_prob):

FILE: mmdet/models/dense_heads/fsaf_head.py
  class FSAFHead (line 15) | class FSAFHead(RetinaHead):
    method __init__ (line 43) | def __init__(self, *args, score_threshold=None, init_cfg=None, **kwargs):
    method forward_single (line 63) | def forward_single(self, x):
    method _get_targets_single (line 80) | def _get_targets_single(self,
    method loss (line 188) | def loss(self,
    method calculate_pos_recall (line 317) | def calculate_pos_recall(self, cls_scores, labels_list, pos_inds):
    method collect_loss_level_single (line 351) | def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds,
    method reweight_loss_single (line 382) | def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds,

FILE: mmdet/models/dense_heads/ga_retina_head.py
  class GARetinaHead (line 11) | class GARetinaHead(GuidedAnchorHead):
    method __init__ (line 14) | def __init__(self,
    method _init_layers (line 45) | def _init_layers(self):
    method forward_single (line 92) | def forward_single(self, x):

FILE: mmdet/models/dense_heads/ga_rpn_head.py
  class GARPNHead (line 16) | class GARPNHead(GuidedAnchorHead):
    method __init__ (line 19) | def __init__(self,
    method _init_layers (line 34) | def _init_layers(self):
    method forward_single (line 40) | def forward_single(self, x):
    method loss (line 49) | def loss(self,
    method _get_bboxes_single (line 72) | def _get_bboxes_single(self,

FILE: mmdet/models/dense_heads/gfl_head.py
  class Integral (line 16) | class Integral(nn.Module):
    method __init__ (line 29) | def __init__(self, reg_max=16):
    method forward (line 35) | def forward(self, x):
  class GFLHead (line 53) | class GFLHead(AnchorHead):
    method __init__ (line 88) | def __init__(self,
    method _init_layers (line 128) | def _init_layers(self):
    method forward (line 161) | def forward(self, feats):
    method forward_single (line 179) | def forward_single(self, x, scale):
    method anchor_center (line 205) | def anchor_center(self, anchors):
    method loss_single (line 218) | def loss_single(self, anchors, cls_score, bbox_pred, labels, label_wei...
    method loss (line 307) | def loss(self,
    method _get_bboxes_single (line 380) | def _get_bboxes_single(self,
    method get_targets (line 473) | def get_targets(self,
    method _get_target_single (line 537) | def _get_target_single(self,
    method get_num_level_anchors_inside (line 643) | def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):

FILE: mmdet/models/dense_heads/guided_anchor_head.py
  class FeatureAdaption (line 16) | class FeatureAdaption(BaseModule):
    method __init__ (line 31) | def __init__(self,
    method forward (line 54) | def forward(self, x, shape):
  class GuidedAnchorHead (line 61) | class GuidedAnchorHead(AnchorHead):
    method __init__ (line 97) | def __init__(
    method num_anchors (line 212) | def num_anchors(self):
    method _init_layers (line 217) | def _init_layers(self):
    method forward_single (line 233) | def forward_single(self, x):
    method forward (line 246) | def forward(self, feats):
    method get_sampled_approxs (line 249) | def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'):
    method get_anchors (line 299) | def get_anchors(self,
    method _get_guided_anchors_single (line 350) | def _get_guided_anchors_single(self,
    method ga_loc_targets (line 384) | def ga_loc_targets(self, gt_bboxes_list, featmap_sizes):
    method _ga_shape_target_single (line 486) | def _ga_shape_target_single(self,
    method ga_shape_targets (line 549) | def ga_shape_targets(self,
    method loss_shape_single (line 615) | def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts,
    method loss_loc_single (line 639) | def loss_loc_single(self, loc_pred, loc_target, loc_weight,
    method loss (line 650) | def loss(self,
    method get_bboxes (line 756) | def get_bboxes(self,
    method _get_bboxes_single (line 800) | def _get_bboxes_single(self,

FILE: mmdet/models/dense_heads/lad_head.py
  class LADHead (line 11) | class LADHead(PAAHead):
    method get_label_assignment (line 16) | def get_label_assignment(self,
    method forward_train (line 121) | def forward_train(self,
    method loss (line 160) | def loss(self,

FILE: mmdet/models/dense_heads/ld_head.py
  class LDHead (line 11) | class LDHead(GFLHead):
    method __init__ (line 26) | def __init__(self,
    method loss_single (line 38) | def loss_single(self, anchors, cls_score, bbox_pred, labels, label_wei...
    method forward_train (line 142) | def forward_train(self,
    method loss (line 185) | def loss(self,

FILE: mmdet/models/dense_heads/mask2former_head.py
  class Mask2FormerHead (line 21) | class Mask2FormerHead(MaskFormerHead):
    method __init__ (line 58) | def __init__(self,
    method init_weights (line 137) | def init_weights(self):
    method _get_target_single (line 148) | def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks,
    method loss_single (line 218) | def loss_single(self, cls_scores, mask_preds, gt_labels_list,
    method forward_head (line 309) | def forward_head(self, decoder_out, mask_feature, attn_mask_target_size):
    method forward (line 351) | def forward(self, feats, img_metas):

FILE: mmdet/models/dense_heads/maskformer_head.py
  class MaskFormerHead (line 17) | class MaskFormerHead(AnchorFreeHead):
    method __init__ (line 53) | def __init__(self,
    method init_weights (line 128) | def init_weights(self):
    method preprocess_gt (line 138) | def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs,
    method get_targets (line 174) | def get_targets(self, cls_scores_list, mask_preds_list, gt_labels_list,
    method _get_target_single (line 218) | def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks,
    method loss (line 279) | def loss(self, all_cls_scores, all_mask_preds, gt_labels_list,
    method loss_single (line 323) | def loss_single(self, cls_scores, mask_preds, gt_labels_list,
    method forward (line 415) | def forward(self, feats, img_metas):
    method forward_train (line 480) | def forward_train(self,
    method simple_test (line 527) | def simple_test(self, feats, img_metas, **kwargs):

FILE: mmdet/models/dense_heads/nasfcos_head.py
  class NASFCOSHead (line 12) | class NASFCOSHead(FCOSHead):
    method __init__ (line 20) | def __init__(self, *args, init_cfg=None, **kwargs):
    method _init_layers (line 39) | def _init_layers(self):

FILE: mmdet/models/dense_heads/paa_head.py
  function levels_to_images (line 18) | def levels_to_images(mlvl_tensor):
  class PAAHead (line 46) | class PAAHead(ATSSHead):
    method __init__ (line 74) | def __init__(self,
    method loss (line 87) | def loss(self,
    method get_pos_loss (line 202) | def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_wei...
    method paa_reassign (line 258) | def paa_reassign(self, pos_losses, label, label_weight, bbox_weight,
    method gmm_separation_scheme (line 367) | def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm):
    method get_targets (line 402) | def get_targets(
    method _get_targets_single (line 494) | def _get_targets_single(self,
    method get_bboxes (line 521) | def get_bboxes(self,
    method _get_bboxes_single (line 537) | def _get_bboxes_single(self,
    method _bbox_post_process (line 620) | def _bbox_post_process(self,
    method score_voting (line 693) | def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,

FILE: mmdet/models/dense_heads/pisa_retinanet_head.py
  class PISARetinaHead (line 12) | class PISARetinaHead(RetinaHead):
    method loss (line 23) | def loss(self,

FILE: mmdet/models/dense_heads/pisa_ssd_head.py
  class PISASSDHead (line 12) | class PISASSDHead(SSDHead):
    method loss (line 14) | def loss(self,

FILE: mmdet/models/dense_heads/reppoints_head.py
  class RepPointsHead (line 17) | class RepPointsHead(AnchorFreeHead):
    method __init__ (line 36) | def __init__(self,
    method _init_layers (line 126) | def _init_layers(self):
    method points2bbox (line 170) | def points2bbox(self, pts, y_first=True):
    method gen_grid_from_reg (line 221) | def gen_grid_from_reg(self, reg, previous_boxes):
    method forward (line 255) | def forward(self, feats):
    method forward_single (line 258) | def forward_single(self, x):
    method get_points (line 304) | def get_points(self, featmap_sizes, img_metas, device):
    method centers_to_bboxes (line 332) | def centers_to_bboxes(self, point_list):
    method offset_to_pts (line 350) | def offset_to_pts(self, center_list, pred_list):
    method _point_target_single (line 371) | def _point_target_single(self,
    method get_targets (line 442) | def get_targets(self,
    method loss_single (line 525) | def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
    method loss (line 563) | def loss(self,
    method _get_bboxes_single (line 659) | def _get_bboxes_single(self,
    method _bbox_decode (line 756) | def _bbox_decode(self, points, bbox_pred, stride, max_shape):

FILE: mmdet/models/dense_heads/retina_head.py
  class RetinaHead (line 10) | class RetinaHead(AnchorHead):
    method __init__ (line 29) | def __init__(self,
    method _init_layers (line 61) | def _init_layers(self):
    method forward_single (line 94) | def forward_single(self, x):

FILE: mmdet/models/dense_heads/retina_sepbn_head.py
  class RetinaSepBNHead (line 10) | class RetinaSepBNHead(AnchorHead):
    method __init__ (line 18) | def __init__(self,
    method _init_layers (line 36) | def _init_layers(self):
    method init_weights (line 78) | def init_weights(self):
    method forward (line 89) | def forward(self, feats):

FILE: mmdet/models/dense_heads/rpn_head.py
  class RPNHead (line 15) | class RPNHead(AnchorHead):
    method __init__ (line 24) | def __init__(self,
    method _init_layers (line 33) | def _init_layers(self):
    method forward_single (line 62) | def forward_single(self, x):
    method loss (line 70) | def loss(self,
    method _get_bboxes_single (line 103) | def _get_bboxes_single(self,
    method _bbox_post_process (line 189) | def _bbox_post_process(self, mlvl_scores, mlvl_bboxes, mlvl_valid_anch...
    method onnx_export (line 237) | def onnx_export(self, x, img_metas):

FILE: mmdet/models/dense_heads/sabl_retina_head.py
  class SABLRetinaHead (line 21) | class SABLRetinaHead(BaseDenseHead, BBoxTestMixin):
    method __init__ (line 54) | def __init__(self,
    method num_anchors (line 158) | def num_anchors(self):
    method _init_layers (line 163) | def _init_layers(self):
    method forward_single (line 194) | def forward_single(self, x):
    method forward (line 207) | def forward(self, feats):
    method get_anchors (line 210) | def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
    method get_target (line 231) | def get_target(self,
    method _get_target_single (line 331) | def _get_target_single(self,
    method loss_single (line 448) | def loss_single(self, cls_score, bbox_pred, labels, label_weights,
    method loss (line 481) | def loss(self,
    method get_bboxes (line 535) | def get_bboxes(self,
    method _get_bboxes_single (line 567) | def _get_bboxes_single(self,

FILE: mmdet/models/dense_heads/solo_head.py
  class SOLOHead (line 17) | class SOLOHead(BaseMaskHead):
    method __init__ (line 50) | def __init__(
    method _init_layers (line 103) | def _init_layers(self):
    method resize_feats (line 133) | def resize_feats(self, feats):
    method forward (line 155) | def forward(self, feats):
    method loss (line 201) | def loss(self,
    method _get_targets_single (line 291) | def _get_targets_single(self,
    method get_results (line 439) | def get_results(self, mlvl_mask_preds, mlvl_cls_scores, img_metas,
    method _get_results_single (line 488) | def _get_results_single(self, cls_scores, mask_preds, img_meta, cfg=No...
  class DecoupledSOLOHead (line 590) | class DecoupledSOLOHead(SOLOHead):
    method __init__ (line 599) | def __init__(self,
    method _init_layers (line 623) | def _init_layers(self):
    method forward (line 667) | def forward(self, feats):
    method loss (line 728) | def loss(self,
    method _get_targets_single (line 831) | def _get_targets_single(self,
    method get_results (line 874) | def get_results(self,
    method _get_results_single (line 939) | def _get_results_single(self, cls_scores, mask_preds_x, mask_preds_y,
  class DecoupledSOLOLightHead (line 1063) | class DecoupledSOLOLightHead(DecoupledSOLOHead):
    method __init__ (line 1073) | def __init__(self,
    method _init_layers (line 1100) | def _init_layers(self):
    method forward (line 1143) | def forward(self, feats):

FILE: mmdet/models/dense_heads/solov2_head.py
  class MaskFeatModule (line 19) | class MaskFeatModule(BaseModule):
    method __init__ (line 42) | def __init__(self,
    method _init_layers (line 66) | def _init_layers(self):
    method forward (line 134) | def forward(self, feats):
  class SOLOV2Head (line 154) | class SOLOV2Head(SOLOHead):
    method __init__ (line 170) | def __init__(self,
    method _init_layers (line 211) | def _init_layers(self):
    method forward (line 254) | def forward(self, feats):
    method _get_targets_single (line 294) | def _get_targets_single(self,
    method loss (line 462) | def loss(self,
    method get_results (line 584) | def get_results(self, mlvl_kernel_preds, mlvl_cls_scores, mask_feats,
    method _get_results_single (line 645) | def _get_results_single(self,

FILE: mmdet/models/dense_heads/ssd_head.py
  class SSDHead (line 19) | class SSDHead(AnchorHead):
    method __init__ (line 50) | def __init__(self,
    method num_anchors (line 116) | def num_anchors(self):
    method _init_layers (line 125) | def _init_layers(self):
    method forward (line 196) | def forward(self, feats):
    method loss_single (line 220) | def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weig...
    method loss (line 279) | def loss(self,

FILE: mmdet/models/dense_heads/tood_head.py
  class TaskDecomposition (line 17) | class TaskDecomposition(nn.Module):
    method __init__ (line 28) | def __init__(self,
    method init_weights (line 58) | def init_weights(self):
    method forward (line 64) | def forward(self, feat, avg_feat=None):
  class TOODHead (line 90) | class TOODHead(ATSSHead):
    method __init__ (line 113) | def __init__(self,
    method _init_layers (line 142) | def _init_layers(self):
    method init_weights (line 191) | def init_weights(self):
    method forward (line 210) | def forward(self, feats):
    method deform_sampling (line 284) | def deform_sampling(self, feat, offset):
    method anchor_center (line 297) | def anchor_center(self, anchors):
    method loss_single (line 310) | def loss_single(self, anchors, cls_score, bbox_pred, labels, label_wei...
    method loss (line 383) | def loss(self,
    method _get_bboxes_single (line 462) | def _get_bboxes_single(self,
    method get_targets (line 549) | def get_targets(self,
    method _get_target_single (line 664) | def _get_target_single(self,

FILE: mmdet/models/dense_heads/vfnet_head.py
  class VFNetHead (line 22) | class VFNetHead(ATSSHead, FCOSHead):
    method __init__ (line 68) | def __init__(self,
    method num_anchors (line 176) | def num_anchors(self):
    method anchor_generator (line 186) | def anchor_generator(self):
    method _init_layers (line 191) | def _init_layers(self):
    method forward (line 226) | def forward(self, feats):
    method forward_single (line 248) | def forward_single(self, x, scale, scale_refine, stride, reg_denom):
    method star_dcn_offset (line 309) | def star_dcn_offset(self, bbox_pred, gradient_mul, stride):
    method loss (line 351) | def loss(self,
    method get_targets (line 500) | def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,
    method _get_target_single (line 535) | def _get_target_single(self, *args, **kwargs):
    method get_fcos_targets (line 542) | def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):
    method get_anchors (line 568) | def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
    method get_atss_targets (line 598) | def get_atss_targets(self,
    method transform_bbox_targets (line 677) | def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):
    method _load_from_state_dict (line 703) | def _load_from_state_dict(self, state_dict, prefix, local_metadata, st...
    method _get_points_single (line 709) | def _get_points_single(self,

FILE: mmdet/models/dense_heads/yolact_head.py
  class YOLACTHead (line 16) | class YOLACTHead(AnchorHead):
    method __init__ (line 44) | def __init__(self,
    method _init_layers (line 89) | def _init_layers(self):
    method forward_single (line 117) | def forward_single(self, x):
    method loss (line 140) | def loss(self,
    method loss_single_OHEM (line 261) | def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels,
    method get_bboxes (line 297) | def get_bboxes(self,
    method _get_bboxes_single (line 358) | def _get_bboxes_single(self,
  class YOLACTSegmHead (line 457) | class YOLACTSegmHead(BaseModule):
    method __init__ (line 472) | def __init__(self,
    method _init_layers (line 490) | def _init_layers(self):
    method forward (line 495) | def forward(self, x):
    method loss (line 509) | def loss(self, segm_pred, gt_masks, gt_labels):
    method get_targets (line 542) | def get_targets(self, segm_pred, gt_masks, gt_labels):
    method simple_test (line 572) | def simple_test(self, feats, img_metas, rescale=False):
  class YOLACTProtonet (line 580) | class YOLACTProtonet(BaseModule):
    method __init__ (line 599) | def __init__(self,
    method _init_layers (line 625) | def _init_layers(self):
    method forward_dummy (line 662) | def forward_dummy(self, x):
    method forward (line 666) | def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=No...
    method loss (line 743) | def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_resu...
    method get_targets (line 816) | def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds):
    method get_seg_masks (line 841) | def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale):
    method crop (line 876) | def crop(self, masks, boxes, padding=1):
    method sanitize_coordinates (line 910) | def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True):
    method simple_test (line 941) | def simple_test(self,
  class InterpolateModule (line 1004) | class InterpolateModule(BaseModule):
    method __init__ (line 1010) | def __init__(self, *args, init_cfg=None, **kwargs):
    method forward (line 1016) | def forward(self, x):

FILE: mmdet/models/dense_heads/yolo_head.py
  class YOLOV3Head (line 23) | class YOLOV3Head(BaseDenseHead, BBoxTestMixin):
    method __init__ (line 51) | def __init__(self,
    method anchor_generator (line 125) | def anchor_generator(self):
    method num_anchors (line 132) | def num_anchors(self):
    method num_levels (line 142) | def num_levels(self):
    method num_attrib (line 146) | def num_attrib(self):
    method _init_layers (line 152) | def _init_layers(self):
    method init_weights (line 170) | def init_weights(self):
    method forward (line 186) | def forward(self, feats):
    method get_bboxes (line 209) | def get_bboxes(self,
    method loss (line 301) | def loss(self,
    method loss_single (line 352) | def loss_single(self, pred_map, target_map, neg_map):
    method get_targets (line 397) | def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list,
    method _get_targets_single (line 433) | def _get_targets_single(self, anchors, responsible_flags, gt_bboxes,
    method aug_test (line 494) | def aug_test(self, feats, img_metas, rescale=False):
    method onnx_export (line 513) | def onnx_export(self, pred_maps, img_metas, with_nms=True):

FILE: mmdet/models/dense_heads/yolof_head.py
  function levels_to_images (line 15) | def levels_to_images(mlvl_tensor):
  class YOLOFHead (line 43) | class YOLOFHead(AnchorHead):
    method __init__ (line 56) | def __init__(self,
    method _init_layers (line 68) | def _init_layers(self):
    method init_weights (line 108) | def init_weights(self):
    method forward_single (line 119) | def forward_single(self, feature):
    method loss (line 137) | def loss(self,
    method get_targets (line 224) | def get_targets(self,
    method _get_targets_single (line 311) | def _get_targets_single(self,

FILE: mmdet/models/dense_heads/yolox_head.py
  class YOLOXHead (line 22) | class YOLOXHead(BaseDenseHead, BBoxTestMixin):
    method __init__ (line 53) | def __init__(self,
    method _init_layers (line 131) | def _init_layers(self):
    method _build_stacked_convs (line 145) | def _build_stacked_convs(self):
    method _build_predictor (line 169) | def _build_predictor(self):
    method init_weights (line 176) | def init_weights(self):
    method forward_single (line 185) | def forward_single(self, x, cls_convs, reg_convs, conv_cls, conv_reg,
    method forward (line 198) | def forward(self, feats):
    method get_bboxes (line 217) | def get_bboxes(self,
    method _bbox_decode (line 301) | def _bbox_decode(self, priors, bbox_preds):
    method _bboxes_nms (line 313) | def _bboxes_nms(self, cls_scores, bboxes, score_factor, cfg):
    method loss (line 328) | def loss(self,
    method _get_target_single (line 426) | def _get_target_single(self, cls_preds, objectness, priors, decoded_bb...
    method _get_l1_target (line 488) | def _get_l1_target(self, l1_target, gt_bboxes, priors, eps=1e-8):

FILE: mmdet/models/detectors/atss.py
  class ATSS (line 7) | class ATSS(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/autoassign.py
  class AutoAssign (line 7) | class AutoAssign(SingleStageDetector):
    method __init__ (line 11) | def __init__(self,

FILE: mmdet/models/detectors/base.py
  class BaseDetector (line 14) | class BaseDetector(BaseModule, metaclass=ABCMeta):
    method __init__ (line 17) | def __init__(self, init_cfg=None):
    method with_neck (line 22) | def with_neck(self):
    method with_shared_head (line 29) | def with_shared_head(self):
    method with_bbox (line 34) | def with_bbox(self):
    method with_mask (line 40) | def with_mask(self):
    method extract_feat (line 46) | def extract_feat(self, imgs):
    method extract_feats (line 50) | def extract_feats(self, imgs):
    method forward_train (line 63) | def forward_train(self, imgs, img_metas, **kwargs):
    method async_simple_test (line 82) | async def async_simple_test(self, img, img_metas, **kwargs):
    method simple_test (line 86) | def simple_test(self, img, img_metas, **kwargs):
    method aug_test (line 90) | def aug_test(self, imgs, img_metas, **kwargs):
    method aforward_test (line 94) | async def aforward_test(self, *, img, img_metas, **kwargs):
    method forward_test (line 112) | def forward_test(self, imgs, img_metas, **kwargs):
    method forward (line 157) | def forward(self, img, img_metas, return_loss=True, **kwargs):
    method _parse_losses (line 176) | def _parse_losses(self, losses):
    method train_step (line 221) | def train_step(self, data, optimizer):
    method val_step (line 256) | def val_step(self, data, optimizer=None):
    method show_result (line 276) | def show_result(self,
    method onnx_export (line 363) | def onnx_export(self, img, img_metas):

FILE: mmdet/models/detectors/cascade_rcnn.py
  class CascadeRCNN (line 7) | class CascadeRCNN(TwoStageDetector):
    method __init__ (line 11) | def __init__(self,
    method show_result (line 30) | def show_result(self, data, result, **kwargs):

FILE: mmdet/models/detectors/centernet.py
  class CenterNet (line 11) | class CenterNet(SingleStageDetector):
    method __init__ (line 17) | def __init__(self,
    method merge_aug_results (line 28) | def merge_aug_results(self, aug_results, with_nms):
    method aug_test (line 54) | def aug_test(self, imgs, img_metas, rescale=True):

FILE: mmdet/models/detectors/cornernet.py
  class CornerNet (line 10) | class CornerNet(SingleStageDetector):
    method __init__ (line 17) | def __init__(self,
    method merge_aug_results (line 28) | def merge_aug_results(self, aug_results, img_metas):
    method aug_test (line 62) | def aug_test(self, imgs, img_metas, rescale=False):

FILE: mmdet/models/detectors/ddod.py
  class DDOD (line 7) | class DDOD(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/deformable_detr.py
  class DeformableDETR (line 7) | class DeformableDETR(DETR):
    method __init__ (line 9) | def __init__(self, *args, **kwargs):

FILE: mmdet/models/detectors/detr.py
  class DETR (line 11) | class DETR(SingleStageDetector):
    method __init__ (line 15) | def __init__(self,
    method forward_dummy (line 27) | def forward_dummy(self, img):
    method onnx_export (line 50) | def onnx_export(self, img, img_metas):

FILE: mmdet/models/detectors/fast_rcnn.py
  class FastRCNN (line 7) | class FastRCNN(TwoStageDetector):
    method __init__ (line 10) | def __init__(self,
    method forward_test (line 27) | def forward_test(self, imgs, img_metas, proposals, **kwargs):

FILE: mmdet/models/detectors/faster_rcnn.py
  class FasterRCNN (line 7) | class FasterRCNN(TwoStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/fcos.py
  class FCOS (line 7) | class FCOS(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/fovea.py
  class FOVEA (line 7) | class FOVEA(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/fsaf.py
  class FSAF (line 7) | class FSAF(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/gfl.py
  class GFL (line 7) | class GFL(SingleStageDetector):
    method __init__ (line 9) | def __init__(self,

FILE: mmdet/models/detectors/grid_rcnn.py
  class GridRCNN (line 7) | class GridRCNN(TwoStageDetector):
    method __init__ (line 15) | def __init__(self,

FILE: mmdet/models/detectors/htc.py
  class HybridTaskCascade (line 7) | class HybridTaskCascade(CascadeRCNN):
    method __init__ (line 10) | def __init__(self, **kwargs):
    method with_semantic (line 14) | def with_semantic(self):

FILE: mmdet/models/detectors/kd_one_stage.py
  class KnowledgeDistillationSingleStageDetector (line 14) | class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
    method __init__ (line 25) | def __init__(self,
    method forward_train (line 46) | def forward_train(self,
    method cuda (line 78) | def cuda(self, device=None):
    method train (line 84) | def train(self, mode=True):
    method __setattr__ (line 92) | def __setattr__(self, name, value):

FILE: mmdet/models/detectors/lad.py
  class LAD (line 11) | class LAD(KnowledgeDistillationSingleStageDetector):
    method __init__ (line 14) | def __init__(self,
    method with_teacher_neck (line 42) | def with_teacher_neck(self):
    method extract_teacher_feat (line 47) | def extract_teacher_feat(self, img):
    method forward_train (line 54) | def forward_train(self,

FILE: mmdet/models/detectors/mask2former.py
  class Mask2Former (line 7) | class Mask2Former(MaskFormer):
    method __init__ (line 12) | def __init__(self,

FILE: mmdet/models/detectors/mask_rcnn.py
  class MaskRCNN (line 7) | class MaskRCNN(TwoStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/mask_scoring_rcnn.py
  class MaskScoringRCNN (line 7) | class MaskScoringRCNN(TwoStageDetector):
    method __init__ (line 13) | def __init__(self,

FILE: mmdet/models/detectors/maskformer.py
  class MaskFormer (line 14) | class MaskFormer(SingleStageDetector):
    method __init__ (line 19) | def __init__(self,
    method forward_dummy (line 52) | def forward_dummy(self, img, img_metas):
    method forward_train (line 70) | def forward_train(self,
    method simple_test (line 113) | def simple_test(self, imgs, img_metas, **kwargs):
    method aug_test (line 181) | def aug_test(self, imgs, img_metas, **kwargs):
    method onnx_export (line 184) | def onnx_export(self, img, img_metas):
    method _show_pan_result (line 187) | def _show_pan_result(self,

FILE: mmdet/models/detectors/nasfcos.py
  class NASFCOS (line 7) | class NASFCOS(SingleStageDetector):
    method __init__ (line 13) | def __init__(self,

FILE: mmdet/models/detectors/paa.py
  class PAA (line 7) | class PAA(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/panoptic_fpn.py
  class PanopticFPN (line 7) | class PanopticFPN(TwoStagePanopticSegmentor):
    method __init__ (line 11) | def __init__(

FILE: mmdet/models/detectors/panoptic_two_stage_segmentor.py
  class TwoStagePanopticSegmentor (line 14) | class TwoStagePanopticSegmentor(TwoStageDetector):
    method __init__ (line 21) | def __init__(
    method with_semantic_head (line 52) | def with_semantic_head(self):
    method with_panoptic_fusion_head (line 57) | def with_panoptic_fusion_head(self):
    method forward_dummy (line 61) | def forward_dummy(self, img):
    method forward_train (line 69) | def forward_train(self,
    method simple_test_mask (line 108) | def simple_test_mask(self,
    method simple_test (line 169) | def simple_test(self, img, img_metas, proposals=None, rescale=False):
    method show_result (line 208) | def show_result(self,

FILE: mmdet/models/detectors/point_rend.py
  class PointRend (line 7) | class PointRend(TwoStageDetector):
    method __init__ (line 15) | def __init__(self,

FILE: mmdet/models/detectors/queryinst.py
  class QueryInst (line 7) | class QueryInst(SparseRCNN):
    method __init__ (line 11) | def __init__(self,

FILE: mmdet/models/detectors/reppoints_detector.py
  class RepPointsDetector (line 7) | class RepPointsDetector(SingleStageDetector):
    method __init__ (line 14) | def __init__(self,

FILE: mmdet/models/detectors/retinanet.py
  class RetinaNet (line 7) | class RetinaNet(SingleStageDetector):
    method __init__ (line 10) | def __init__(self,

FILE: mmdet/models/detectors/rpn.py
  class RPN (line 15) | class RPN(BaseDetector):
    method __init__ (line 18) | def __init__(self,
    method extract_feat (line 40) | def extract_feat(self, img):
    method forward_dummy (line 55) | def forward_dummy(self, img):
    method forward_train (line 61) | def forward_train(self,
    method simple_test (line 92) | def simple_test(self, img, img_metas, rescale=False):
    method aug_test (line 118) | def aug_test(self, imgs, img_metas, rescale=False):
    method show_result (line 143) | def show_result(self, data, result, top_k=20, **kwargs):

FILE: mmdet/models/detectors/scnet.py
  class SCNet (line 7) | class SCNet(CascadeRCNN):
    method __init__ (line 10) | def __init__(self, **kwargs):

FILE: mmdet/models/detectors/single_stage.py
  class SingleStageDetector (line 12) | class SingleStageDetector(BaseDetector):
    method __init__ (line 19) | def __init__(self,
    method extract_feat (line 41) | def extract_feat(self, img):
    method forward_dummy (line 48) | def forward_dummy(self, img):
    method forward_train (line 57) | def forward_train(self,
    method simple_test (line 87) | def simple_test(self, img, img_metas, rescale=False):
    method aug_test (line 110) | def aug_test(self, imgs, img_metas, rescale=False):
    method onnx_export (line 141) | def onnx_export(self, img, img_metas, with_nms=True):

FILE: mmdet/models/detectors/single_stage_instance_seg.py
  class SingleStageInstanceSegmentor (line 17) | class SingleStageInstanceSegmentor(BaseDetector):
    method __init__ (line 20) | def __init__(self,
    method extract_feat (line 56) | def extract_feat(self, img):
    method forward_dummy (line 63) | def forward_dummy(self, img):
    method forward_train (line 71) | def forward_train(self,
    method simple_test (line 142) | def simple_test(self, img, img_metas, rescale=False):
    method format_results (line 184) | def format_results(self, results):
    method aug_test (line 252) | def aug_test(self, imgs, img_metas, rescale=False):
    method show_result (line 255) | def show_result(self,

FILE: mmdet/models/detectors/solo.py
  class SOLO (line 7) | class SOLO(SingleStageInstanceSegmentor):
    method __init__ (line 13) | def __init__(self,

FILE: mmdet/models/detectors/solov2.py
  class SOLOv2 (line 7) | class SOLOv2(SingleStageInstanceSegmentor):
    method __init__ (line 13) | def __init__(self,

FILE: mmdet/models/detectors/sparse_rcnn.py
  class SparseRCNN (line 7) | class SparseRCNN(TwoStageDetector):
    method __init__ (line 11) | def __init__(self, *args, **kwargs):
    method forward_train (line 16) | def forward_train(self,
    method simple_test (line 67) | def simple_test(self, img, img_metas, rescale=False):
    method forward_dummy (line 93) | def forward_dummy(self, img):

FILE: mmdet/models/detectors/tood.py
  class TOOD (line 7) | class TOOD(SingleStageDetector):
    method __init__ (line 11) | def __init__(self,
    method set_epoch (line 22) | def set_epoch(self, epoch):

FILE: mmdet/models/detectors/trident_faster_rcnn.py
  class TridentFasterRCNN (line 7) | class TridentFasterRCNN(FasterRCNN):
    method __init__ (line 10) | def __init__(self,
    method simple_test (line 34) | def simple_test(self, img, img_metas, proposals=None, rescale=False):
    method aug_test (line 49) | def aug_test(self, imgs, img_metas, rescale=False):
    method forward_train (line 62) | def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):

FILE: mmdet/models/detectors/two_stage.py
  class TwoStageDetector (line 11) | class TwoStageDetector(BaseDetector):
    method __init__ (line 18) | def __init__(self,
    method with_rpn (line 56) | def with_rpn(self):
    method with_roi_head (line 61) | def with_roi_head(self):
    method extract_feat (line 65) | def extract_feat(self, img):
    method forward_dummy (line 72) | def forward_dummy(self, img):
    method forward_train (line 90) | def forward_train(self,
    method async_simple_test (line 155) | async def async_simple_test(self,
    method simple_test (line 173) | def simple_test(self, img, img_metas, proposals=None, rescale=False):
    method aug_test (line 186) | def aug_test(self, imgs, img_metas, rescale=False):
    method onnx_export (line 197) | def onnx_export(self, img, img_metas):

FILE: mmdet/models/detectors/vfnet.py
  class VFNet (line 7) | class VFNet(SingleStageDetector):
    method __init__ (line 11) | def __init__(self,

FILE: mmdet/models/detectors/yolact.py
  class YOLACT (line 10) | class YOLACT(SingleStageDetector):
    method __init__ (line 13) | def __init__(self,
    method forward_dummy (line 28) | def forward_dummy(self, img):
    method forward_train (line 38) | def forward_train(self,
    method simple_test (line 97) | def simple_test(self, img, img_metas, rescale=False):
    method aug_test (line 117) | def aug_test(self, imgs, img_metas, rescale=False):

FILE: mmdet/models/detectors/yolo.py
  class YOLOV3 (line 10) | class YOLOV3(SingleStageDetector):
    method __init__ (line 12) | def __init__(self,
    method onnx_export (line 23) | def onnx_export(self, img, img_metas):

FILE: mmdet/models/detectors/yolof.py
  class YOLOF (line 7) | class YOLOF(SingleStageDetector):
    method __init__ (line 11) | def __init__(self,

FILE: mmdet/models/detectors/yolox.py
  class YOLOX (line 15) | class YOLOX(SingleStageDetector):
    method __init__ (line 46) | def __init__(self,
    method forward_train (line 69) | def forward_train(self,
    method _preprocess (line 105) | def _preprocess(self, img, gt_bboxes):
    method _random_resize (line 119) | def _random_resize(self, device):

FILE: mmdet/models/losses/accuracy.py
  function accuracy (line 7) | def accuracy(pred, target, topk=1, thresh=None):
  class Accuracy (line 54) | class Accuracy(nn.Module):
    method __init__ (line 56) | def __init__(self, topk=(1, ), thresh=None):
    method forward (line 69) | def forward(self, pred, target):

FILE: mmdet/models/losses/ae_loss.py
  function ae_loss_per_image (line 11) | def ae_loss_per_image(tl_preds, br_preds, match):
  class AssociativeEmbeddingLoss (line 75) | class AssociativeEmbeddingLoss(nn.Module):
    method __init__ (line 88) | def __init__(self, pull_weight=0.25, push_weight=0.25):
    method forward (line 93) | def forward(self, pred, target, match):

FILE: mmdet/models/losses/balanced_l1_loss.py
  function balanced_l1_loss (line 13) | def balanced_l1_loss(pred,
  class BalancedL1Loss (line 57) | class BalancedL1Loss(nn.Module):
    method __init__ (line 74) | def __init__(self,
    method forward (line 87) | def forward(self,

FILE: mmdet/models/losses/cross_entropy_loss.py
  function cross_entropy (line 12) | def cross_entropy(pred,
  function _expand_onehot_labels (line 64) | def _expand_onehot_labels(labels, label_weights, label_channels, ignore_...
  function binary_cross_entropy (line 85) | def binary_cross_entropy(pred,
  function mask_cross_entropy (line 148) | def mask_cross_entropy(pred,
  class CrossEntropyLoss (line 201) | class CrossEntropyLoss(nn.Module):
    method __init__ (line 203) | def __init__(self,
    method extra_repr (line 252) | def extra_repr(self):
    method forward (line 257) | def forward(self,

FILE: mmdet/models/losses/dice_loss.py
  function dice_loss (line 9) | def dice_loss(pred,
  class DiceLoss (line 66) | class DiceLoss(nn.Module):
    method __init__ (line 68) | def __init__(self,
    method forward (line 103) | def forward(self,

FILE: mmdet/models/losses/focal_loss.py
  function py_sigmoid_focal_loss (line 12) | def py_sigmoid_focal_loss(pred,
  function py_focal_loss_with_prob (line 60) | def py_focal_loss_with_prob(pred,
  function sigmoid_focal_loss (line 113) | def sigmoid_focal_loss(pred,
  class FocalLoss (line 160) | class FocalLoss(nn.Module):
    method __init__ (line 162) | def __init__(self,
    method forward (line 196) | def forward(self,

FILE: mmdet/models/losses/gaussian_focal_loss.py
  function gaussian_focal_loss (line 11) | def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
  class GaussianFocalLoss (line 33) | class GaussianFocalLoss(nn.Module):
    method __init__ (line 50) | def __init__(self,
    method forward (line 61) | def forward(self,

FILE: mmdet/models/losses/gfocal_loss.py
  function quality_focal_loss (line 12) | def quality_focal_loss(pred, target, beta=2.0):
  function quality_focal_loss_with_prob (line 56) | def quality_focal_loss_with_prob(pred, target, beta=2.0):
  function distribution_focal_loss (line 103) | def distribution_focal_loss(pred, label):
  class QualityFocalLoss (line 128) | class QualityFocalLoss(nn.Module):
    method __init__ (line 146) | def __init__(self,
    method forward (line 160) | def forward(self,
  class DistributionFocalLoss (line 203) | class DistributionFocalLoss(nn.Module):
    method __init__ (line 213) | def __init__(self, reduction='mean', loss_weight=1.0):
    method forward (line 218) | def forward(self,

FILE: mmdet/models/losses/ghm_loss.py
  function _expand_onehot_labels (line 10) | def _expand_onehot_labels(labels, label_weights, label_channels):
  class GHMC (line 23) | class GHMC(nn.Module):
    method __init__ (line 39) | def __init__(self,
    method forward (line 60) | def forward(self,
  class GHMR (line 122) | class GHMR(nn.Module):
    method __init__ (line 138) | def __init__(self,
    method forward (line 158) | def forward(self,

FILE: mmdet/models/losses/iou_loss.py
  function iou_loss (line 16) | def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
  function bounded_iou_loss (line 55) | def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
  function giou_loss (line 102) | def giou_loss(pred, target, eps=1e-7):
  function diou_loss (line 122) | def diou_loss(pred, target, eps=1e-7):
  function ciou_loss (line 177) | def ciou_loss(pred, target, eps=1e-7):
  class IoULoss (line 241) | class IoULoss(nn.Module):
    method __init__ (line 256) | def __init__(self,
    method forward (line 275) | def forward(self,
  class BoundedIoULoss (line 322) | class BoundedIoULoss(nn.Module):
    method __init__ (line 324) | def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1...
    method forward (line 331) | def forward(self,
  class GIoULoss (line 358) | class GIoULoss(nn.Module):
    method __init__ (line 360) | def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
    method forward (line 366) | def forward(self,
  class DIoULoss (line 398) | class DIoULoss(nn.Module):
    method __init__ (line 400) | def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
    method forward (line 406) | def forward(self,
  class CIoULoss (line 438) | class CIoULoss(nn.Module):
    method __init__ (line 440) | def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
    method forward (line 446) | def forward(self,

FILE: mmdet/models/losses/kd_loss.py
  function knowledge_distillation_kl_div_loss (line 12) | def knowledge_distillation_kl_div_loss(pred,
  class KnowledgeDistillationKLDivLoss (line 40) | class KnowledgeDistillationKLDivLoss(nn.Module):
    method __init__ (line 49) | def __init__(self, reduction='mean', loss_weight=1.0, T=10):
    method forward (line 56) | def forward(self,

FILE: mmdet/models/losses/mse_loss.py
  function mse_loss (line 10) | def mse_loss(pred, target):
  class MSELoss (line 16) | class MSELoss(nn.Module):
    method __init__ (line 25) | def __init__(self, reduction='mean', loss_weight=1.0):
    method forward (line 30) | def forward(self,

FILE: mmdet/models/losses/pisa_loss.py
  function isr_p (line 9) | def isr_p(cls_score,
  function carl_loss (line 123) | def carl_loss(cls_score,

FILE: mmdet/models/losses/seesaw_loss.py
  function seesaw_ce_loss (line 12) | def seesaw_ce_loss(cls_score,
  class SeesawLoss (line 80) | class SeesawLoss(nn.Module):
    method __init__ (line 103) | def __init__(self,
    method _split_cls_score (line 138) | def _split_cls_score(self, cls_score):
    method get_cls_channels (line 145) | def get_cls_channels(self, num_classes):
    method get_activation (line 157) | def get_activation(self, cls_score):
    method get_accuracy (line 177) | def get_accuracy(self, cls_score, labels):
    method forward (line 199) | def forward(self,

FILE: mmdet/models/losses/smooth_l1_loss.py
  function smooth_l1_loss (line 12) | def smooth_l1_loss(pred, target, beta=1.0):
  function l1_loss (line 37) | def l1_loss(pred, target):
  class SmoothL1Loss (line 56) | class SmoothL1Loss(nn.Module):
    method __init__ (line 67) | def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
    method forward (line 73) | def forward(self,
  class L1Loss (line 108) | class L1Loss(nn.Module):
    method __init__ (line 117) | def __init__(self, reduction='mean', loss_weight=1.0):
    method forward (line 122) | def forward(self,

FILE: mmdet/models/losses/utils.py
  function reduce_loss (line 9) | def reduce_loss(loss, reduction):
  function weight_reduce_loss (line 30) | def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=N...
  function weighted_loss (line 62) | def weighted_loss(loss_func):

FILE: mmdet/models/losses/varifocal_loss.py
  function varifocal_loss (line 11) | def varifocal_loss(pred,
  class VarifocalLoss (line 60) | class VarifocalLoss(nn.Module):
    method __init__ (line 62) | def __init__(self,
    method forward (line 97) | def forward(self,

FILE: mmdet/models/necks/bfp.py
  class BFP (line 11) | class BFP(BaseModule):
    method __init__ (line 33) | def __init__(self,
    method forward (line 70) | def forward(self, inputs):

FILE: mmdet/models/necks/channel_mapper.py
  class ChannelMapper (line 10) | class ChannelMapper(BaseModule):
    method __init__ (line 46) | def __init__(self,
    method forward (line 90) | def forward(self, inputs):

FILE: mmdet/models/necks/ct_resnet_neck.py
  class CTResNetNeck (line 12) | class CTResNetNeck(BaseModule):
    method __init__ (line 24) | def __init__(self,
    method _make_deconv_layer (line 38) | def _make_deconv_layer(self, num_deconv_filters, num_deconv_kernels):
    method init_weights (line 64) | def init_weights(self):
    method forward (line 91) | def forward(self, inputs):

FILE: mmdet/models/necks/dilated_encoder.py
  class Bottleneck (line 10) | class Bottleneck(nn.Module):
    method __init__ (line 24) | def __init__(self,
    method forward (line 42) | def forward(self, x):
  class DilatedEncoder (line 52) | class DilatedEncoder(nn.Module):
    method __init__ (line 68) | def __init__(self, in_channels, out_channels, block_mid_channels,
    method _init_layers (line 78) | def _init_layers(self):
    method init_weights (line 95) | def init_weights(self):
    method forward (line 106) | def forward(self, feature):

FILE: mmdet/models/necks/dyhead.py
  class DyDCNv2 (line 17) | class DyDCNv2(nn.Module):
    method __init__ (line 32) | def __init__(self,
    method forward (line 45) | def forward(self, x, offset, mask):
  class DyHeadBlock (line 53) | class DyHeadBlock(nn.Module):
    method __init__ (line 69) | def __init__(self,
    method _init_weights (line 91) | def _init_weights(self):
    method forward (line 98) | def forward(self, x):
  class DyHead (line 132) | class DyHead(BaseModule):
    method __init__ (line 148) | def __init__(self,
    method forward (line 172) | def forward(self, inputs):

FILE: mmdet/models/necks/fpg.py
  class Transition (line 10) | class Transition(BaseModule):
    method __init__ (line 18) | def __init__(self, in_channels, out_channels, init_cfg=None):
    method forward (line 23) | def forward(x):
  class UpInterpolationConv (line 27) | class UpInterpolationConv(Transition):
    method __init__ (line 43) | def __init__(self,
    method forward (line 63) | def forward(self, x):
  class LastConv (line 73) | class LastConv(Transition):
    method __init__ (line 83) | def __init__(self,
    method forward (line 99) | def forward(self, inputs):
  class FPG (line 105) | class FPG(BaseModule):
    method __init__ (line 150) | def __init__(self,
    method build_trans (line 317) | def build_trans(self, cfg, in_channels, out_channels, **extra_args):
    method fuse (line 323) | def fuse(self, fuse_dict):
    method forward (line 333) | def forward(self, inputs):

FILE: mmdet/models/necks/fpn.py
  class FPN (line 11) | class FPN(BaseModule):
    method __init__ (line 62) | def __init__(self,
    method forward (line 152) | def forward(self, inputs):

FILE: mmdet/models/necks/fpn_carafe.py
  class FPN_CARAFE (line 11) | class FPN_CARAFE(BaseModule):
    method __init__ (line 37) | def __init__(self,
    method init_weights (line 209) | def init_weights(self):
    method slice_as (line 219) | def slice_as(self, src, dst):
    method tensor_add (line 239) | def tensor_add(self, a, b):
    method forward (line 247) | def forward(self, inputs):

FILE: mmdet/models/necks/hrfpn.py
  class HRFPN (line 13) | class HRFPN(BaseModule):
    method __init__ (line 33) | def __init__(self,
    method forward (line 77) | def forward(self, inputs):

FILE: mmdet/models/necks/nas_fpn.py
  class NASFPN (line 11) | class NASFPN(BaseModule):
    method __init__ (line 33) | def __init__(self,
    method forward (line 127) | def forward(self, inputs):

FILE: mmdet/models/necks/nasfcos_fpn.py
  class NASFCOS_FPN (line 12) | class NASFCOS_FPN(BaseModule):
    method __init__ (line 35) | def __init__(self,
    method forward (line 123) | def forward(self, inputs):
    method init_weights (line 157) | def init_weights(self):

FILE: mmdet/models/necks/pafpn.py
  class PAFPN (line 12) | class PAFPN(FPN):
    method __init__ (line 46) | def __init__(self,
    method forward (line 100) | def forward(self, inputs):

FILE: mmdet/models/necks/rfp.py
  class ASPP (line 12) | class ASPP(BaseModule):
    method __init__ (line 26) | def __init__(self,
    method forward (line 48) | def forward(self, x):
  class RFP (line 60) | class RFP(FPN):
    method __init__ (line 78) | def __init__(self,
    method init_weights (line 105) | def init_weights(self):
    method forward (line 117) | def forward(self, inputs):

FILE: mmdet/models/necks/ssd_neck.py
  class SSDNeck (line 11) | class SSDNeck(BaseModule):
    method __init__ (line 33) | def __init__(self,
    method forward (line 93) | def forward(self, inputs):
  class L2Norm (line 106) | class L2Norm(nn.Module):
    method __init__ (line 108) | def __init__(self, n_dims, scale=20., eps=1e-10):
    method forward (line 123) | def forward(self, x):

FILE: mmdet/models/necks/yolo_neck.py
  class DetectionBlock (line 12) | class DetectionBlock(BaseModule):
    method __init__ (line 35) | def __init__(self,
    method forward (line 55) | def forward(self, x):
  class YOLOV3Neck (line 65) | class YOLOV3Neck(BaseModule):
    method __init__ (line 92) | def __init__(self,
    method forward (line 120) | def forward(self, feats):

FILE: mmdet/models/necks/yolox_pafpn.py
  class YOLOXPAFPN (line 14) | class YOLOXPAFPN(BaseModule):
    method __init__ (line 35) | def __init__(self,
    method forward (line 117) | def forward(self, inputs):

FILE: mmdet/models/plugins/dropblock.py
  class DropBlock (line 11) | class DropBlock(nn.Module):
    method __init__ (line 25) | def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):
    method forward (line 35) | def forward(self, x):
    method _compute_gamma (line 62) | def _compute_gamma(self, feat_size):
    method extra_repr (line 83) | def extra_repr(self):

FILE: mmdet/models/plugins/msdeformattn_pixel_decoder.py
  class MSDeformAttnPixelDecoder (line 16) | class MSDeformAttnPixelDecoder(BaseModule):
    method __init__ (line 40) | def __init__(self,
    method init_weights (line 135) | def init_weights(self):
    method forward (line 161) | def forward(self, feats):

FILE: mmdet/models/plugins/pixel_decoder.py
  class PixelDecoder (line 12) | class PixelDecoder(BaseModule):
    method __init__ (line 34) | def __init__(self,
    method init_weights (line 79) | def init_weights(self):
    method forward (line 88) | def forward(self, feats, img_metas):
  class TransformerEncoderPixelDecoder (line 116) | class TransformerEncoderPixelDecoder(PixelDecoder):
    method __init__ (line 138) | def __init__(self,
    method init_weights (line 178) | def init_weights(self):
    method forward (line 192) | def forward(self, feats, img_metas):

FILE: mmdet/models/roi_heads/base_roi_head.py
  class BaseRoIHead (line 9) | class BaseRoIHead(BaseModule, metaclass=ABCMeta):
    method __init__ (line 12) | def __init__(self,
    method with_bbox (line 38) | def with_bbox(self):
    method with_mask (line 43) | def with_mask(self):
    method with_shared_head (line 48) | def with_shared_head(self):
    method init_bbox_head (line 53) | def init_bbox_head(self):
    method init_mask_head (line 58) | def init_mask_head(self):
    method init_assigner_sampler (line 63) | def init_assigner_sampler(self):
    method forward_train (line 68) | def forward_train(self,
    method async_simple_test (line 79) | async def async_simple_test(self,
    method simple_test (line 89) | def simple_test(self,
    method aug_test (line 98) | def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):

FILE: mmdet/models/roi_heads/bbox_heads/bbox_head.py
  class BBoxHead (line 15) | class BBoxHead(BaseModule):
    method __init__ (line 19) | def __init__(self,
    method custom_cls_channels (line 97) | def custom_cls_channels(self):
    method custom_activation (line 101) | def custom_activation(self):
    method custom_accuracy (line 105) | def custom_accuracy(self):
    method forward (line 109) | def forward(self, x):
    method _get_target_single (line 122) | def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,
    method get_targets (line 189) | def get_targets(self,
    method loss (line 257) | def loss(self,
    method get_bboxes (line 316) | def get_bboxes(self,
    method refine_bboxes (line 381) | def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
    method regress_by_class (line 460) | def regress_by_class(self, rois, label, bbox_pred, img_meta):
    method onnx_export (line 499) | def onnx_export(self,

FILE: mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
  class ConvFCBBoxHead (line 11) | class ConvFCBBoxHead(BBoxHead):
    method __init__ (line 22) | def __init__(self,
    method _add_conv_fc_branch (line 118) | def _add_conv_fc_branch(self,
    method forward (line 159) | def forward(self, x):
  class Shared2FCBBoxHead (line 201) | class Shared2FCBBoxHead(ConvFCBBoxHead):
    method __init__ (line 203) | def __init__(self, fc_out_channels=1024, *args, **kwargs):
  class Shared4Conv1FCBBoxHead (line 217) | class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
    method __init__ (line 219) | def __init__(self, fc_out_channels=1024, *args, **kwargs):

FILE: mmdet/models/roi_heads/bbox_heads/dii_head.py
  class DIIHead (line 18) | class DIIHead(BBoxHead):
    method __init__ (line 46) | def __init__(self,
    method init_weights (line 125) | def init_weights(self):
    method forward (line 141) | def forward(self, roi_feat, proposal_feat):
    method loss (line 201) | def loss(self,
    method _get_target_single (line 285) | def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes,
    method get_targets (line 359) | def get_targets(self,

FILE: mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
  class BasicResBlock (line 11) | class BasicResBlock(BaseModule):
    method __init__ (line 26) | def __init__(self,
    method forward (line 63) | def forward(self, x):
  class DoubleConvFCBBoxHead (line 77) | class DoubleConvFCBBoxHead(BBoxHead):
    method __init__ (line 91) | def __init__(self,
    method _add_conv_branch (line 136) | def _add_conv_branch(self):
    method _add_fc_branch (line 148) | def _add_fc_branch(self):
    method forward (line 158) | def forward(self, x_cls, x_reg):

FILE: mmdet/models/roi_heads/bbox_heads/sabl_head.py
  class SABLHead (line 15) | class SABLHead(BaseModule):
    method __init__ (line 58) | def __init__(self,
    method custom_cls_channels (line 210) | def custom_cls_channels(self):
    method custom_activation (line 214) | def custom_activation(self):
    method custom_accuracy (line 218) | def custom_accuracy(self):
    method _add_fc_branch (line 221) | def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,
    method cls_forward (line 230) | def cls_forward(self, cls_x):
    method attention_pool (line 237) | def attention_pool(self, reg_x):
    method side_aware_feature_extractor (line 250) | def side_aware_feature_extractor(self, reg_x):
    method reg_pred (line 271) | def reg_pred(self, x, offset_fcs, cls_fcs):
    method side_aware_split (line 289) | def side_aware_split(self, feat):
    method bbox_pred_split (line 301) | def bbox_pred_split(self, bbox_pred, num_proposals_per_img):
    method reg_forward (line 310) | def reg_forward(self, reg_x):
    method forward (line 329) | def forward(self, x):
    method get_targets (line 336) | def get_targets(self, sampling_results, gt_bboxes, gt_labels,
    method bucket_target (line 351) | def bucket_target(self,
    method _bucket_target_single (line 377) | def _bucket_target_single(self, pos_proposals, neg_proposals,
    method loss (line 440) | def loss(self,
    method get_bboxes (line 485) | def get_bboxes(self,
    method refine_bboxes (line 527) | def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
    method regress_by_class (line 573) | def regress_by_class(self, rois, label, bbox_pred, img_meta):

FILE: mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py
  class SCNetBBoxHead (line 7) | class SCNetBBoxHead(ConvFCBBoxHead):
    method _forward_shared (line 14) | def _forward_shared(self, x):
    method _forward_cls_reg (line 31) | def _forward_cls_reg(self, x):
    method forward (line 59) | def forward(self, x, return_shared_feat=False):

FILE: mmdet/models/roi_heads/cascade_roi_head.py
  class CascadeRoIHead (line 16) | class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
    method __init__ (line 22) | def __init__(self,
    method init_bbox_head (line 52) | def init_bbox_head(self, bbox_roi_extractor, bbox_head):
    method init_mask_head (line 72) | def init_mask_head(self, mask_roi_extractor, mask_head):
    method init_assigner_sampler (line 100) | def init_assigner_sampler(self):
    method forward_dummy (line 112) | def forward_dummy(self, x, proposals):
    method _bbox_forward (line 130) | def _bbox_forward(self, stage, x, rois):
    method _bbox_forward_train (line 143) | def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,
    method _mask_forward (line 158) | def _mask_forward(self, stage, x, rois):
    method _mask_forward_train (line 170) | def _mask_forward_train(self,
    method forward_train (line 191) | def forward_train(self,
    method simple_test (line 288) | def simple_test(self, x, proposal_list, img_metas, rescale=False):
    method aug_test (line 458) | def aug_test(self, features, proposal_list, img_metas, rescale=False):
    method onnx_export (line 561) | def onnx_export(self, x, proposals, img_metas):

FILE: mmdet/models/roi_heads/double_roi_head.py
  class DoubleHeadRoIHead (line 7) | class DoubleHeadRoIHead(StandardRoIHead):
    method __init__ (line 13) | def __init__(self, reg_roi_scale_factor, **kwargs):
    method _bbox_forward (line 17) | def _bbox_forward(self, x, rois):

FILE: mmdet/models/roi_heads/dynamic_roi_head.py
  class DynamicRoIHead (line 14) | class DynamicRoIHead(StandardRoIHead):
    method __init__ (line 17) | def __init__(self, **kwargs):
    method forward_train (line 25) | def forward_train(self,
    method _bbox_forward_train (line 109) | def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
    method update_hyperparameters (line 134) | def update_hyperparameters(self):

FILE: mmdet/models/roi_heads/grid_roi_head.py
  class GridRoIHead (line 11) | class GridRoIHead(StandardRoIHead):
    method __init__ (line 17) | def __init__(self, grid_roi_extractor, grid_head, **kwargs):
    method _random_jitter (line 28) | def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):
    method forward_dummy (line 53) | def forward_dummy(self, x, proposals):
    method _bbox_forward_train (line 79) | def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
    method simple_test (line 117) | def simple_test(self,

FILE: mmdet/models/roi_heads/htc_roi_head.py
  class HybridTaskCascadeRoIHead (line 14) | class HybridTaskCascadeRoIHead(CascadeRoIHead):
    method __init__ (line 20) | def __init__(self,
    method with_semantic (line 44) | def with_semantic(self):
    method forward_dummy (line 51) | def forward_dummy(self, x, proposals):
    method _bbox_forward_train (line 86) | def _bbox_forward_train(self,
    method _mask_forward_train (line 113) | def _mask_forward_train(self,
    method _bbox_forward (line 158) | def _bbox_forward(self, stage, x, rois, semantic_feat=None):
    method _mask_forward_test (line 176) | def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):
    method forward_train (line 205) | def forward_train(self,
    method simple_test (line 330) | def simple_test(self, x, proposal_list, img_metas, rescale=False):
    method aug_test (line 505) | def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):

FILE: mmdet/models/roi_heads/mask_heads/coarse_mask_head.py
  class CoarseMaskHead (line 10) | class CoarseMaskHead(FCNMaskHead):
    method __init__ (line 26) | def __init__(self,
    method init_weights (line 84) | def init_weights(self):
    method forward (line 88) | def forward(self, x):

FILE: mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py
  class DynamicMaskHead (line 14) | class DynamicMaskHead(FCNMaskHead):
    method __init__ (line 42) | def __init__(self,
    method init_weights (line 83) | def init_weights(self):
    method forward (line 92) | def forward(self, roi_feat, proposal_feat):
    method loss (line 125) | def loss(self, mask_pred, mask_targets, labels):
    method get_targets (line 139) | def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):

FILE: mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
  class FCNMaskHead (line 23) | class FCNMaskHead(BaseModule):
    method __init__ (line 25) | def __init__(self,
    method init_weights (line 115) | def init_weights(self):
    method forward (line 128) | def forward(self, x):
    method get_targets (line 138) | def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
    method loss (line 148) | def loss(self, mask_pred, mask_targets, labels):
    method get_seg_masks (line 179) | def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
    method onnx_export (line 312) | def onnx_export(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
  function _do_paste_mask (line 344) | def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):

FILE: mmdet/models/roi_heads/mask_heads/feature_relay_head.py
  class FeatureRelayHead (line 9) | class FeatureRelayHead(BaseModule):
    method __init__ (line 22) | def __init__(self,
    method forward (line 43) | def forward(self, x):

FILE: mmdet/models/roi_heads/mask_heads/fused_semantic_head.py
  class FusedSemanticHead (line 13) | class FusedSemanticHead(BaseModule):
    method __init__ (line 29) | def __init__(self,
    method forward (line 97) | def forward(self, feats):
    method loss (line 115) | def loss(self, mask_pred, labels):

FILE: mmdet/models/roi_heads/mask_heads/global_context_head.py
  class GlobalContextHead (line 11) | class GlobalContextHead(BaseModule):
    method __init__ (line 29) | def __init__(self,
    method forward (line 80) | def forward(self, feats):
    method loss (line 94) | def loss(self, pred, labels):

FILE: mmdet/models/roi_heads/mask_heads/grid_head.py
  class GridHead (line 13) | class GridHead(BaseModule):
    method __init__ (line 15) | def __init__(self,
    method forward (line 155) | def forward(self, x):
    method calc_sub_regions (line 193) | def calc_sub_regions(self):
    method get_targets (line 224) | def get_targets(self, sampling_results, rcnn_train_cfg):
    method loss (line 292) | def loss(self, grid_pred, grid_targets):
    method get_bboxes (line 298) | def get_bboxes(self, det_bboxes, grid_pred, img_metas):

FILE: mmdet/models/roi_heads/mask_heads/htc_mask_head.py
  class HTCMaskHead (line 9) | class HTCMaskHead(FCNMaskHead):
    method __init__ (line 11) | def __init__(self, with_conv_res=True, *args, **kwargs):
    method forward (line 22) | def forward(self, x, res_feat=None, return_logits=True, return_feat=Tr...

FILE: mmdet/models/roi_heads/mask_heads/mask_point_head.py
  class MaskPointHead (line 16) | class MaskPointHead(BaseModule):
    method __init__ (line 42) | def __init__(self,
    method forward (line 88) | def forward(self, fine_grained_feats, coarse_feats):
    method get_targets (line 109) | def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
    method _get_target_single (line 149) | def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
    method loss (line 167) | def loss(self, point_pred, point_targets, labels):
    method get_roi_rel_points_train (line 190) | def get_roi_rel_points_train(self, mask_pred, labels, cfg):
    method get_roi_rel_points_test (line 215) | def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):

FILE: mmdet/models/roi_heads/mask_heads/maskiou_head.py
  class MaskIoUHead (line 13) | class MaskIoUHead(BaseModule):
    method __init__ (line 19) | def __init__(self,
    method forward (line 73) | def forward(self, mask_feat, mask_pred):
    method loss (line 88) | def loss(self, mask_iou_pred, mask_iou_targets):
    method get_targets (line 98) | def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targ...
    method _get_area_ratio (line 147) | def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
    method get_mask_scores (line 174) | def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):

FILE: mmdet/models/roi_heads/mask_heads/scnet_mask_head.py
  class SCNetMaskHead (line 8) | class SCNetMaskHead(FCNMaskHead):
    method __init__ (line 16) | def __init__(self, conv_to_res=True, **kwargs):

FILE: mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py
  class SCNetSemanticHead (line 8) | class SCNetSemanticHead(FusedSemanticHead):
    method __init__ (line 16) | def __init__(self, conv_to_res=True, **kwargs):

FILE: mmdet/models/roi_heads/mask_scoring_roi_head.py
  class MaskScoringRoIHead (line 10) | class MaskScoringRoIHead(StandardRoIHead):
    method __init__ (line 16) | def __init__(self, mask_iou_head, **kwargs):
    method _mask_forward_train (line 21) | def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
    method simple_test_mask (line 49) | def simple_test_mask(self,

FILE: mmdet/models/roi_heads/pisa_roi_head.py
  class PISARoIHead (line 9) | class PISARoIHead(StandardRoIHead):
    method forward_train (line 13) | def forward_train(self,
    method _bbox_forward (line 87) | def _bbox_forward(self, x, rois):
    method _bbox_forward_train (line 100) | def _bbox_forward_train(self,

FILE: mmdet/models/roi_heads/point_rend_roi_head.py
  class PointRendRoIHead (line 18) | class PointRendRoIHead(StandardRoIHead):
    method __init__ (line 21) | def __init__(self, point_head, *args, **kwargs):
    method init_point_head (line 26) | def init_point_head(self, point_head):
    method _mask_forward_train (line 30) | def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
    method _mask_point_forward_train (line 45) | def _mask_point_forward_train(self, x, sampling_results, mask_pred,
    method _get_fine_grained_point_feats (line 66) | def _get_fine_grained_point_feats(self, x, rois, rel_roi_points,
    method _mask_point_forward_test (line 104) | def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,
    method simple_test_mask (line 155) | def simple_test_mask(self,
    method aug_test_mask (line 217) | def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
    method _onnx_get_fine_grained_point_feats (line 250) | def _onnx_get_fine_grained_point_feats(self, x, rois, rel_roi_points):
    method _mask_point_onnx_export (line 284) | def _mask_point_onnx_export(self, x, rois, label_pred, mask_pred):
    method mask_onnx_export (line 351) | def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwa...

FILE: mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py
  class BaseRoIExtractor (line 10) | class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
    method __init__ (line 21) | def __init__(self,
    method num_inputs (line 33) | def num_inputs(self):
    method build_roi_layers (line 37) | def build_roi_layers(self, layer_cfg, featmap_strides):
    method roi_rescale (line 62) | def roi_rescale(self, rois, scale_factor):
    method forward (line 87) | def forward(self, feats, rois, roi_scale_factor=None):

FILE: mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
  class GenericRoIExtractor (line 10) | class GenericRoIExtractor(BaseRoIExtractor):
    method __init__ (line 25) | def __init__(self,
    method forward (line 44) | def forward(self, feats, rois, roi_scale_factor=None):

FILE: mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
  class SingleRoIExtractor (line 10) | class SingleRoIExtractor(BaseRoIExtractor):
    method __init__ (line 26) | def __init__(self,
    method map_roi_levels (line 36) | def map_roi_levels(self, rois, num_levels):
    method forward (line 58) | def forward(self, feats, rois, roi_scale_factor=None):

FILE: mmdet/models/roi_heads/scnet_roi_head.py
  class SCNetRoIHead (line 14) | class SCNetRoIHead(CascadeRoIHead):
    method __init__ (line 26) | def __init__(self,
    method init_mask_head (line 50) | def init_mask_head(self, mask_roi_extractor, mask_head):
    method with_semantic (line 57) | def with_semantic(self):
    method with_feat_relay (line 63) | def with_feat_relay(self):
    method with_glbctx (line 69) | def with_glbctx(self):
    method _fuse_glbctx (line 73) | def _fuse_glbctx(self, roi_feats, glbctx_feat, rois):
    method _slice_pos_feats (line 83) | def _slice_pos_feats(self, feats, sampling_results):
    method _bbox_forward (line 96) | def _bbox_forward(self,
    method _mask_forward (line 125) | def _mask_forward(self,
    method _bbox_forward_train (line 150) | def _bbox_forward_train(self,
    method _mask_forward_train (line 179) | def _mask_forward_train(self,
    method forward_train (line 206) | def forward_train(self,
    method simple_test (line 314) | def simple_test(self, x, proposal_list, img_metas, rescale=False):
    method aug_test (line 487) | def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):

FILE: mmdet/models/roi_heads/shared_heads/res_layer.py
  class ResLayer (line 13) | class ResLayer(BaseModule):
    method __init__ (line 15) | def __init__(self,
    method forward (line 70) | def forward(self, x):
    method train (line 75) | def train(self, mode=True):

FILE: mmdet/models/roi_heads/sparse_roi_head.py
  class SparseRoIHead (line 12) | class SparseRoIHead(CascadeRoIHead):
    method __init__ (line 37) | def __init__(self,
    method _bbox_forward (line 88) | def _bbox_forward(self, stage, x, rois, object_feats, img_metas):
    method _mask_forward (line 151) | def _mask_forward(self, stage, x, rois, attn_feats):
    method _mask_forward_train (line 163) | def _mask_forward_train(self, stage, x, attn_feats, sampling_results,
    method forward_train (line 184) | def forward_train(self,
    method simple_test (line 276) | def simple_test(self,
    method aug_test (line 400) | def aug_test(self, features, proposal_list, img_metas, rescale=False):
    method forward_dummy (line 404) | def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas):

FILE: mmdet/models/roi_heads/standard_roi_head.py
  class StandardRoIHead (line 11) | class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
    method init_assigner_sampler (line 14) | def init_assigner_sampler(self):
    method init_bbox_head (line 23) | def init_bbox_head(self, bbox_roi_extractor, bbox_head):
    method init_mask_head (line 28) | def init_mask_head(self, mask_roi_extractor, mask_head):
    method forward_dummy (line 38) | def forward_dummy(self, x, proposals):
    method forward_train (line 54) | def forward_train(self,
    method _bbox_forward (line 118) | def _bbox_forward(self, x, rois):
    method _bbox_forward_train (line 131) | def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
    method _mask_forward_train (line 146) | def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
    method _mask_forward (line 181) | def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
    method async_simple_test (line 198) | async def async_simple_test(self,
    method simple_test (line 223) | def simple_test(self,
    method aug_test (line 269) | def aug_test(self, x, proposal_list, img_metas, rescale=False):
    method onnx_export (line 295) | def onnx_export(self, x, proposals, img_metas, rescale=False):
    method mask_onnx_export (line 308) | def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwa...
    method bbox_onnx_export (line 350) | def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg,

FILE: mmdet/models/roi_heads/test_mixins.py
  class BBoxTestMixin (line 15) | class BBoxTestMixin:
    method async_test_bboxes (line 19) | async def async_test_bboxes(self,
    method simple_test_bboxes (line 51) | def simple_test_bboxes(self,
    method aug_test_bboxes (line 138) | def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_c...
  class MaskTestMixin (line 179) | class MaskTestMixin:
    method async_test_mask (line 183) | async def async_test_mask(self,
    method simple_test_mask (line 224) | def simple_test_mask(self,
    method aug_test_mask (line 281) | def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):

FILE: mmdet/models/roi_heads/trident_roi_head.py
  class TridentRoIHead (line 12) | class TridentRoIHead(StandardRoIHead):
    method __init__ (line 22) | def __init__(self, num_branch, test_branch_idx, **kwargs):
    method merge_trident_bboxes (line 27) | def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels):
    method simple_test (line 46) | def simple_test(self,
    method aug_test_bboxes (line 83) | def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_c...

FILE: mmdet/models/seg_heads/base_semantic_head.py
  class BaseSemanticHead (line 11) | class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
    method __init__ (line 20) | def __init__(self,
    method loss (line 32) | def loss(self, seg_preds, gt_semantic_seg):
    method forward (line 55) | def forward(self, x):
    method forward_train (line 65) | def forward_train(self, x, gt_semantic_seg):
    method simple_test (line 70) | def simple_test(self, x, img_metas, rescale=False):

FILE: mmdet/models/seg_heads/panoptic_fpn_head.py
  class PanopticFPNHead (line 14) | class PanopticFPNHead(BaseSemanticHead):
    method __init__ (line 52) | def __init__(self,
    method _set_things_to_void (line 108) | def _set_things_to_void(self, gt_semantic_seg):
    method loss (line 129) | def loss(self, seg_preds, gt_semantic_seg):
    method init_weights (line 137) | def init_weights(self):
    method forward (line 142) | def forward(self, x):

FILE: mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py
  class BasePanopticFusionHead (line 9) | class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta):
    method __init__ (line 12) | def __init__(self,
    method with_loss (line 31) | def with_loss(self):
    method forward_train (line 36) | def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
    method simple_test (line 40) | def simple_test(self,

FILE: mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py
  class HeuristicFusionHead (line 10) | class HeuristicFusionHead(BasePanopticFusionHead):
    method __init__ (line 13) | def __init__(self,
    method forward_train (line 23) | def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
    method _lay_masks (line 27) | def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
    method simple_test (line 82) | def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,

FILE: mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py
  class MaskFormerFusionHead (line 12) | class MaskFormerFusionHead(BasePanopticFusionHead):
    method __init__ (line 14) | def __init__(self,
    method forward_train (line 24) | def forward_train(self, **kwargs):
    method panoptic_postprocess (line 28) | def panoptic_postprocess(self, mask_cls, mask_pred):
    method semantic_postprocess (line 94) | def semantic_postprocess(self, mask_cls, mask_pred):
    method instance_postprocess (line 112) | def instance_postprocess(self, mask_cls, mask_pred):
    method simple_test (line 164) | def simple_test(self,

FILE: mmdet/models/utils/brick_wrappers.py
  function adaptive_avg_pool2d (line 15) | def adaptive_avg_pool2d(input, output_size):
  class AdaptiveAvgPool2d (line 32) | class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d):
    method forward (line 35) | def forward(self, x):

FILE: mmdet/models/utils/builder.py
  function build_transformer (line 9) | def build_transformer(cfg, default_args=None):
  function build_linear_layer (line 17) | def build_linear_layer(cfg, *args, **kwargs):

FILE: mmdet/models/utils/ckpt_convert.py
  function pvt_convert (line 12) | def pvt_convert(ckpt):
  function swin_converter (line 85) | def swin_converter(ckpt):

FILE: mmdet/models/utils/conv_upsample.py
  class ConvUpsample (line 7) | class ConvUpsample(BaseModule):
    method __init__ (line 28) | def __init__(self,
    method forward (line 59) | def forward(self, x):

FILE: mmdet/models/utils/csp_layer.py
  class DarknetBottleneck (line 8) | class DarknetBottleneck(BaseModule):
    method __init__ (line 32) | def __init__(self,
    method forward (line 64) | def forward(self, x):
  class CSPLayer (line 75) | class CSPLayer(BaseModule):
    method __init__ (line 96) | def __init__(self,
    method forward (line 143) | def forward(self, x):

FILE: mmdet/models/utils/gaussian_target.py
  function gaussian2D (line 8) | def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'):
  function gen_gaussian_target (line 32) | def gen_gaussian_target(heatmap, center, radius, k=1):
  function gaussian_radius (line 68) | def gaussian_radius(det_size, min_overlap):
  function get_local_maximum (line 190) | def get_local_maximum(heat, kernel=3):
  function get_topk_from_heatmap (line 207) | def get_topk_from_heatmap(scores, k=20):
  function gather_feat (line 234) | def gather_feat(feat, ind, mask=None):
  function transpose_and_gather_feat (line 255) | def transpose_and_gather_feat(feat, ind):

FILE: mmdet/models/utils/inverted_residual.py
  class InvertedResidual (line 11) | class InvertedResidual(BaseModule):
    method __init__ (line 42) | def __init__(self,
    method forward (line 105) | def forward(self, x):

FILE: mmdet/models/utils/make_divisible.py
  function make_divisible (line 2) | def make_divisible(value, divisor, min_value=None, min_ratio=0.9):

FILE: mmdet/models/utils/misc.py
  class SigmoidGeometricMean (line 6) | class SigmoidGeometricMean(Function):
    method forward (line 17) | def forward(ctx, x, y):
    method backward (line 25) | def backward(ctx, grad_output):
  function interpolate_as (line 35) | def interpolate_as(source, target, mode='bilinear', align_corners=False):

FILE: mmdet/models/utils/normed_predictor.py
  class NormedLinear (line 11) | class NormedLinear(nn.Linear):
    method __init__ (line 21) | def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs):
    method init_weights (line 28) | def init_weights(self):
    method forward (line 33) | def forward(self, x):
  class NormedConv2d (line 43) | class NormedConv2d(nn.Conv2d):
    method __init__ (line 55) | def __init__(self,
    method forward (line 68) | def forward(self, x):

FILE: mmdet/models/utils/panoptic_gt_processing.py
  function preprocess_panoptic_gt (line 5) | def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_thi...

FILE: mmdet/models/utils/point_sample.py
  function get_uncertainty (line 6) | def get_uncertainty(mask_pred, labels):
  function get_uncertain_point_coords_with_randomness (line 32) | def get_uncertain_point_coords_with_randomness(mask_pred, labels, num_po...

FILE: mmdet/models/utils/positional_encoding.py
  class SinePositionalEncoding (line 11) | class SinePositionalEncoding(BaseModule):
    method __init__ (line 36) | def __init__(self,
    method forward (line 56) | def forward(self, mask):
    method __repr__ (line 95) | def __repr__(self):
  class LearnedPositionalEncoding (line 107) | class LearnedPositionalEncoding(BaseModule):
    method __init__ (line 121) | def __init__(self,
    method forward (line 133) | def forward(self, mask):
    method __repr__ (line 157) | def __repr__(self):

FILE: mmdet/models/utils/res_layer.py
  class ResLayer (line 7) | class ResLayer(Sequential):
    method __init__ (line 26) | def __init__(self,
  class SimplifiedBasicBlock (line 107) | class SimplifiedBasicBlock(BaseModule):
    method __init__ (line 116) | def __init__(self,
    method norm1 (line 162) | def norm1(self):
    method norm2 (line 167) | def norm2(self):
    method forward (line 171) | def forward(self, x):

FILE: mmdet/models/utils/se_layer.py
  class SELayer (line 9) | class SELayer(BaseModule):
    method __init__ (line 28) | def __init__(self,
    method forward (line 55) | def forward(self, x):
  class DyReLU (line 62) | class DyReLU(BaseModule):
    method __init__ (line 88) | def __init__(self,
    method forward (line 118) | def forward(self, x):

FILE: mmdet/models/utils/transformer.py
  function nlc_to_nchw (line 32) | def nlc_to_nchw(x, hw_shape):
  function nchw_to_nlc (line 49) | def nchw_to_nlc(x):
  class AdaptivePadding (line 62) | class AdaptivePadding(nn.Module):
    method __init__ (line 93) | def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corne...
    method get_pad_shape (line 109) | def get_pad_shape(self, input_shape):
    method forward (line 121) | def forward(self, x):
  class PatchEmbed (line 134) | class PatchEmbed(BaseModule):
    method __init__ (line 162) | def __init__(
    method forward (line 236) | def forward(self, x):
  class PatchMerging (line 260) | class PatchMerging(BaseModule):
    method __init__ (line 291) | def __init__(self,
    method forward (line 340) | def forward(self, x, input_size):
  function inverse_sigmoid (line 388) | def inverse_sigmoid(x, eps=1e-5):
  class DetrTransformerDecoderLayer (line 408) | class DetrTransformerDecoderLayer(BaseTransformerLayer):
    method __init__ (line 430) | def __init__(self,
  class DetrTransformerEncoder (line 454) | class DetrTransformerEncoder(TransformerLayerSequence):
    method __init__ (line 462) | def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs):
    method forward (line 473) | def forward(self, *args, **kwargs):
  class DetrTransformerDecoder (line 486) | class DetrTransformerDecoder(TransformerLayerSequence):
    method __init__ (line 495) | def __init__(self,
    method forward (line 509) | def forward(self, query, *args, **kwargs):
  class Transformer (line 539) | class Transformer(BaseModule):
    method __init__ (line 561) | def __init__(self, encoder=None, decoder=None, init_cfg=None):
    method init_weights (line 567) | def init_weights(self):
    method forward (line 574) | def forward(self, x, mask, query_embed, pos_embed):
  class DeformableDetrTransformerDecoder (line 625) | class DeformableDetrTransformerDecoder(TransformerLayerSequence):
    method __init__ (line 634) | def __init__(self, *args, return_intermediate=False, **kwargs):
    method forward (line 639) | def forward(self,
  class DeformableDetrTransformer (line 713) | class DeformableDetrTransformer(Transformer):
    method __init__ (line 725) | def __init__(self,
    method init_layers (line 737) | def init_layers(self):
    method init_weights (line 751) | def init_weights(self):
    method gen_encoder_output_proposals (line 763) | def gen_encoder_output_proposals(self, memory, memory_padding_mask,
    method get_reference_points (line 831) | def get_reference_points(spatial_shapes, valid_ratios, device):
    method get_valid_ratio (line 865) | def get_valid_ratio(self, mask):
    method get_proposal_pos_embed (line 875) | def get_proposal_pos_embed(self,
    method forward (line 893) | def forward(self,
  class DynamicConv (line 1063) | class DynamicConv(BaseModule):
    method __init__ (line 1090) | def __init__(self,
    method forward (line 1124) | def forward(self, param_feature, input_feature):

FILE: mmdet/utils/ascend_util.py
  function masked_fill (line 5) | def masked_fill(ori_tensor, mask, new_value, neg=False):
  function batch_images_to_levels (line 26) | def batch_images_to_levels(target, num_levels):
  function get_max_num_gt_division_factor (line 49) | def get_max_num_gt_division_factor(gt_nums,

FILE: mmdet/utils/collect_env.py
  function collect_env (line 8) | def collect_env():

FILE: mmdet/utils/compat_config.py
  function compat_cfg (line 8) | def compat_cfg(cfg):
  function compat_runner_args (line 22) | def compat_runner_args(cfg):
  function compat_imgs_per_gpu (line 37) | def compat_imgs_per_gpu(cfg):
  function compat_loader_args (line 54) | def compat_loader_args(cfg):

FILE: mmdet/utils/contextmanagers.py
  function completed (line 17) | async def completed(trace_name='',
  function concurrent (line 92) | async def concurrent(streamqueue: asyncio.Queue,

FILE: mmdet/utils/logger.py
  function get_root_logger (line 8) | def get_root_logger(log_file=None, log_level=logging.INFO):
  function get_caller_name (line 24) | def get_caller_name():
  function log_img_scale (line 37) | def log_img_scale(img_scale, shape_order='hw', skip_square=False):

FILE: mmdet/utils/memory.py
  function cast_tensor_type (line 12) | def cast_tensor_type(inputs, src_type=None, dst_type=None):
  function _ignore_torch_cuda_oom (line 63) | def _ignore_torch_cuda_oom():
  class AvoidOOM (line 79) | class AvoidOOM:
    method __init__ (line 120) | def __init__(self, to_cpu=True, test=False):
    method retry_if_cuda_oom (line 124) | def retry_if_cuda_oom(self, func):

FILE: mmdet/utils/misc.py
  function find_latest_checkpoint (line 12) | def find_latest_checkpoint(path, suffix='pth'):
  function update_data_root (line 46) | def update_data_root(cfg, logger=None):
  function floordiv (line 85) | def floordiv(dividend, divisor, rounding_mode='trunc'):

FILE: mmdet/utils/profiling.py
  function profile_time (line 11) | def profile_time(trace_name,

FILE: mmdet/utils/replace_cfg_vals.py
  function replace_cfg_vals (line 7) | def replace_cfg_vals(ori_cfg):

FILE: mmdet/utils/rfnext.py
  function rfnext_init_model (line 8) | def rfnext_init_model(detector, cfg):

FILE: mmdet/utils/setup_env.py
  function setup_multi_processes (line 10) | def setup_multi_processes(cfg):

FILE: mmdet/utils/split_batch.py
  function split_batch (line 5) | def split_batch(img, img_metas, kwargs):

FILE: mmdet/utils/util_distribution.py
  function build_dp (line 10) | def build_dp(model, device='cuda', dim=0, *args, **kwargs):
  function build_ddp (line 40) | def build_ddp(model, device='cuda', *args, **kwargs):
  function is_npu_available (line 74) | def is_npu_available():
  function is_mlu_available (line 79) | def is_mlu_available():
  function get_device (line 84) | def get_device():

FILE: mmdet/utils/util_mixins.py
  class NiceRepr (line 42) | class NiceRepr:
    method __nice__ (line 76) | def __nice__(self):
    method __repr__ (line 87) | def __repr__(self):
    method __str__ (line 97) | def __str__(self):

FILE: mmdet/utils/util_random.py
  function ensure_rng (line 6) | def ensure_rng(rng=None):

FILE: mmdet/version.py
  function parse_version_info (line 7) | def parse_version_info(version_str):

FILE: projects/instance_segment_anything/models/det_wrapper_instance_sam.py
  class DetWrapperInstanceSAM (line 15) | class DetWrapperInstanceSAM(BaseDetector):
    method __init__ (line 19) | def __init__(self,
    method init_weights (line 55) | def init_weights(self):
    method simple_test (line 58) | def simple_test(self, img, img_metas, rescale=True, ori_img=None):
    method aug_test (line 134) | def aug_test(self, imgs, img_metas, **kwargs):
    method onnx_export (line 137) | def onnx_export(self, img, img_metas):
    method async_simple_test (line 140) | async def async_simple_test(self, img, img_metas, **kwargs):
    method forward_train (line 143) | def forward_train(self, imgs, img_metas, **kwargs):
    method extract_feat (line 146) | def extract_feat(self, imgs):

FILE: projects/instance_segment_anything/models/det_wrapper_instance_sam_cascade.py
  class DetWrapperInstanceSAMCascade (line 10) | class DetWrapperInstanceSAMCascade(DetWrapperInstanceSAM):
    method __init__ (line 11) | def __init__(self,
    method simple_test (line 41) | def simple_test(self, img, img_metas, rescale=True, ori_img=None):

FILE: projects/instance_segment_anything/models/focalnet_dino/focalnet_dino_wrapper.py
  class FocalNetDINOWrapper (line 9) | class FocalNetDINOWrapper(BaseModule):
    method __init__ (line 10) | def __init__(self,
    method forward (line 23) | def forward(self,
    method simple_test (line 93) | def simple_test(self, img, img_metas, rescale=False):

FILE: projects/instance_segment_anything/models/focalnet_dino/models/__init__.py
  function build_model (line 9) | def build_model(args):

FILE: projects/instance_segment_anything/models/focalnet_dino/models/dino/attention.py
  class MultiheadAttention (line 59) | class MultiheadAttention(Module):
    method __init__ (line 85) | def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bi...
    method _reset_parameters (line 111) | def _reset_parameters(self):
    method __setstate__ (line 114) | def __setstate__(self, state):
    method forward (line 121) | def forward(self, query, key, value, key_padding_mask=None,
  function multi_head_attention_forward (line 183) | def multi_head_attention_forward(query: Tensor,

FILE: projects/instance_segment_anything/models/focalnet_dino/models/dino/backbone.py
  class FrozenBatchNorm2d (line 35) | class FrozenBatchNorm2d(torch.nn.Module):
    method __init__ (line 44) | def __init__(self, n):
    method _load_from_state_dict (line 51) | def _load_from_state_dict(self, state_dict, prefix, local_metadata, st...
    method forward (line 61) | def forward(self, x):
  class BackboneBase (line 74) | class BackboneBase(nn.Module):
    method __init__ (line 76) | def __init__(self, backbone: nn.Module, train_backbone: bool, num_chan...
    method forward (line 96) | def forward(self, tensor_list: NestedTensor):
  class Backbone (line 108) | class Backbone(BackboneBase):
    method __init__ (line 110) | def __init__(self, name: str,
  class Joiner (line 130) | class Joiner(nn.Sequential):
    method __init__ (line 131) | def __init__(self, backbone, position_embedding):
    method forward (line 134) | def forward(self, tensor_list: NestedTensor):
  function build_backbone (line 146) | def build_backbone(args):

FILE: projects/instance_segment_anything/models/focalnet_dino/models/dino/convnext.py
  class Block (line 18) | class Block(nn.Module):
    method __init__ (line 29) | def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):
    method forward (line 40) | def forward(self, x):
  class ConvNeXt (line 55) | class ConvNeXt(nn.Module):
    method __init__ (line 69) | def __init__(self, in_chans=3, num_classes=1000,
    method _init_weights (line 116) | def _init_weights(self, m):
    method forward_features (line 121) | def forward_features(self, x):
    method forward (line 138) | def forward(self, tensor_list: NestedTensor):
  class LayerNorm (line 152) | class LayerNorm(nn.Module):
    method __init__ (line 158) | def __init__(self, normalized_shape, eps=1e-6, data_format="channels_l...
    method forward (line 168) | def forward(self, x):
  function build_convnext (line 234) | def build_convnext(modelname, pretrained,backbone_dir=None, **kw):

FILE: projects/instance_segment_anything/models/focalnet_dino/models/dino/deformable_transformer.py
  class DeformableTransformer (line 26) | class DeformableTransformer(nn.Module):
    method __init__ (line 28) | def __init__(self, d_model=256, nhead=8,
    method _reset_parameters (line 228) | def _reset_parameters(self):
    method get_valid_ratio (line 242) | def get_valid_ratio(self, mask):
    method init_ref_points (line 251) | def init_ref_points(self, use_num_queries):
    method forward (line 262) | def forward(self, srcs, masks, refpoint_embed, pos_embeds, tgt, attn_m...
  class TransformerEncoder (line 446) | class TransformerEncoder(nn.Module):
    method __init__ (line 448) | def __init__(self,
    method get_reference_points (line 489) | def get_reference_points(spatial_shapes, valid_ratios, device):
    method forward (line 503) | def forward(self,
  class TransformerDecoder (line 596) | class TransformerDecoder(nn.Module):
    method __init__ (line 598) | def __init__(self, decoder_layer, num_layers, norm=None,
    method forward (line 667) | def forward(self, tgt, memory,
  class DeformableTransformerEncoderLayer (line 794) | class DeformableTransformerEncoderLayer(nn.Module):
    method __init__ (line 795) | def __init__(self,
    method with_pos_embed (line 828) | def with_pos_embed(tensor, pos):
    method forward_ffn (line 831) | def forward_ffn(self, src):
    method forward (line 837) | def forward(self, src, pos, reference_points, spatial_shapes, level_st...
  class DeformableTransformerDecoderLayer (line 852) | class DeformableTransformerDecoderLayer(nn.Module):
    method __init__ (line 853) | def __init__(self, d_model=256, d_ffn=1024,
    method rm_self_attn_modules (line 899) | def rm_self_attn_modules(self):
    method with_pos_embed (line 906) | def with_pos_embed(tensor, pos):
    method forward_ffn (line 909) | def forward_ffn(self, tgt):
    method forward_sa (line 915) | def forward_sa(self,
    method forward_ca (line 958) | def forward_ca(self,
    method forward (line 994) | def forward(self,
  function _get_clones (line 1033) | def _get_clones(module, N, layer_share=False):
  function build_deformable_transformer (line 1040) | def build_deformable_transformer(args):

FILE: projects/instance_segment_anything/models/focalnet_dino/models/dino/dino.py
  class DINO (line 38) | class DINO(nn.Module):
    method __init__ (line 41) | def __init__(self, backbone, transformer, num_classes, num_queries,
    method _reset_parameters (line 190) | def _reset_parameters(self):
    method init_ref_points (line 196) | def init_ref_points(self, use_num_queries):
    method forward (line 224) | def forward(self, samples: NestedTensor, targets: List = None):
    method _set_aux_loss (line 335) | def _set_aux_loss(self, outputs_class, outputs_coord):
  class SetCriterion (line 343) | class SetCriterion(nn.Module):
    method __init__ (line 350) | def __init__(self, num_classes, matcher, weight_dict, focal_alpha, los...
    method loss_labels (line 366) | def loss_labels(self, outputs, targets, indices, num_boxes, log=True):
    method loss_cardinality (line 394) | def loss_cardinality(self, outputs, targets, indices, num_boxes):
    method loss_boxes (line 407) | def loss_boxes(self, outputs, targets, indices, num_boxes):
    method loss_masks (line 434) | def loss_masks(self, outputs, targets, indices, num_boxes):
    method _get_src_permutation_idx (line 463) | def _get_src_permutation_idx(self, indices):
    method _get_tgt_permutation_idx (line 469) | def _get_tgt_permutation_idx(self, indices):
    method get_loss (line 
Condensed preview — 483 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (4,163K chars).
[
  {
    "path": ".gitignore",
    "chars": 1384,
    "preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packagi"
  },
  {
    "path": "LICENSE",
    "chars": 11398,
    "preview": "Copyright 2018-2023 OpenMMLab. All rights reserved.\n\n                                 Apache License\n                   "
  },
  {
    "path": "README.md",
    "chars": 11758,
    "preview": "# Prompt-Segment-Anything\nThis is an implementation of zero-shot instance segmentation using [Segment Anything](https://"
  },
  {
    "path": "app.py",
    "chars": 12068,
    "preview": "import os\n\nSPACE_ID = os.getenv('SPACE_ID')\nif SPACE_ID is not None:\n    # running on huggingface space\n    os.system(r'"
  },
  {
    "path": "mmdet/__init__.py",
    "chars": 909,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\n\nfrom .version import __version__, short_version\n\n\ndef digit"
  },
  {
    "path": "mmdet/apis/__init__.py",
    "chars": 563,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .inference import (async_inference_detector, inference_detector,\n  "
  },
  {
    "path": "mmdet/apis/inference.py",
    "chars": 8629,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom pathlib import Path\n\nimport mmcv\nimport numpy as np"
  },
  {
    "path": "mmdet/apis/test.py",
    "chars": 7817,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport pickle\nimport shutil\nimport tempfile\nimport"
  },
  {
    "path": "mmdet/apis/train.py",
    "chars": 8379,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\nimport random\n\nimport numpy as np\nimport torch\nimport torch.di"
  },
  {
    "path": "mmdet/core/__init__.py",
    "chars": 445,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .anchor import *  # noqa: F401, F403\nfrom .bbox import *  # noqa: F"
  },
  {
    "path": "mmdet/core/anchor/__init__.py",
    "chars": 720,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator,\n "
  },
  {
    "path": "mmdet/core/anchor/anchor_generator.py",
    "chars": 37205,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport mmcv\nimport numpy as np\nimport torch\nfrom torch."
  },
  {
    "path": "mmdet/core/anchor/builder.py",
    "chars": 583,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nfrom mmcv.utils import Registry, build_from_cfg\n\nPRIOR_"
  },
  {
    "path": "mmdet/core/anchor/point_generator.py",
    "chars": 10739,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom torch.nn.modules.utils import _pair"
  },
  {
    "path": "mmdet/core/anchor/utils.py",
    "chars": 2545,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef images_to_levels(target, num_levels):\n    \"\"\"Convert "
  },
  {
    "path": "mmdet/core/bbox/__init__.py",
    "chars": 1688,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner"
  },
  {
    "path": "mmdet/core/bbox/assigners/__init__.py",
    "chars": 1171,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .approx_max_iou_assigner import ApproxMaxIoUAssigner\nfrom .ascend_a"
  },
  {
    "path": "mmdet/core/bbox/assigners/approx_max_iou_assigner.py",
    "chars": 6697,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculator"
  },
  {
    "path": "mmdet/core/bbox/assigners/ascend_assign_result.py",
    "chars": 1403,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmdet.utils import util_mixins\n\n\nclass AscendAssignResult(util_mixi"
  },
  {
    "path": "mmdet/core/bbox/assigners/ascend_max_iou_assigner.py",
    "chars": 8841,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ....utils import masked_fill\nfrom ..builder import BB"
  },
  {
    "path": "mmdet/core/bbox/assigners/assign_result.py",
    "chars": 7761,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.utils import util_mixins\n\n\nclass AssignResult(u"
  },
  {
    "path": "mmdet/core/bbox/assigners/atss_assigner.py",
    "chars": 10174,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom"
  },
  {
    "path": "mmdet/core/bbox/assigners/base_assigner.py",
    "chars": 375,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\n\nclass BaseAssigner(metaclass=A"
  },
  {
    "path": "mmdet/core/bbox/assigners/center_region_assigner.py",
    "chars": 15477,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculator"
  },
  {
    "path": "mmdet/core/bbox/assigners/grid_assigner.py",
    "chars": 6863,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculator"
  },
  {
    "path": "mmdet/core/bbox/assigners/hungarian_assigner.py",
    "chars": 6439,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom scipy.optimize import linear_sum_assignment\n\nfrom ..bu"
  },
  {
    "path": "mmdet/core/bbox/assigners/mask_hungarian_assigner.py",
    "chars": 5312,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom scipy.optimize import linear_sum_assignment\n\nfrom mmde"
  },
  {
    "path": "mmdet/core/bbox/assigners/max_iou_assigner.py",
    "chars": 10168,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculator"
  },
  {
    "path": "mmdet/core/bbox/assigners/point_assigner.py",
    "chars": 5995,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom .assign_result i"
  },
  {
    "path": "mmdet/core/bbox/assigners/region_assigner.py",
    "chars": 9473,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.core import anchor_inside_flags\nfrom ..builder "
  },
  {
    "path": "mmdet/core/bbox/assigners/sim_ota_assigner.py",
    "chars": 11495,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn.functional as F\n\nfrom ..bu"
  },
  {
    "path": "mmdet/core/bbox/assigners/task_aligned_assigner.py",
    "chars": 6554,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculator"
  },
  {
    "path": "mmdet/core/bbox/assigners/uniform_assigner.py",
    "chars": 5556,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_ASSIGNERS\nfrom ..iou_calculator"
  },
  {
    "path": "mmdet/core/bbox/builder.py",
    "chars": 628,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.utils import Registry, build_from_cfg\n\nBBOX_ASSIGNERS = Regist"
  },
  {
    "path": "mmdet/core/bbox/coder/__init__.py",
    "chars": 654,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_bbox_coder import BaseBBoxCoder\nfrom .bucketing_bbox_coder im"
  },
  {
    "path": "mmdet/core/bbox/coder/base_bbox_coder.py",
    "chars": 496,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\n\nclass BaseBBoxCoder(metaclass="
  },
  {
    "path": "mmdet/core/bbox/coder/bucketing_bbox_coder.py",
    "chars": 14119,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn.functional a"
  },
  {
    "path": "mmdet/core/bbox/coder/delta_xywh_bbox_coder.py",
    "chars": 15978,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport mmcv\nimport numpy as np\nimport torch\n\nfrom ..bui"
  },
  {
    "path": "mmdet/core/bbox/coder/distance_point_bbox_coder.py",
    "chars": 2481,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import BBOX_CODERS\nfrom ..transforms import bbox2distance"
  },
  {
    "path": "mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py",
    "chars": 8257,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\nimport torch\n\nfrom ..builder import BBOX_"
  },
  {
    "path": "mmdet/core/bbox/coder/pseudo_bbox_coder.py",
    "chars": 577,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import BBOX_CODERS\nfrom .base_bbox_coder import BaseBBoxC"
  },
  {
    "path": "mmdet/core/bbox/coder/tblr_bbox_coder.py",
    "chars": 8625,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch\n\nfrom ..builder import BBOX_CODERS\nfrom .base_b"
  },
  {
    "path": "mmdet/core/bbox/coder/yolo_bbox_coder.py",
    "chars": 3252,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport torch\n\nfrom ..builder import BBOX_CODERS\nfrom .base_b"
  },
  {
    "path": "mmdet/core/bbox/demodata.py",
    "chars": 1181,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom mmdet.utils.util_random import ens"
  },
  {
    "path": "mmdet/core/bbox/iou_calculators/__init__.py",
    "chars": 221,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .builder import build_iou_calculator\nfrom .iou2d_calculator import "
  },
  {
    "path": "mmdet/core/bbox/iou_calculators/builder.py",
    "chars": 293,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.utils import Registry, build_from_cfg\n\nIOU_CALCULATORS = Regis"
  },
  {
    "path": "mmdet/core/bbox/iou_calculators/iou2d_calculator.py",
    "chars": 9602,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom .builder import IOU_CALCULATORS\n\n\ndef cast_tensor_typ"
  },
  {
    "path": "mmdet/core/bbox/match_costs/__init__.py",
    "chars": 366,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .builder import build_match_cost\nfrom .match_cost import (BBoxL1Cos"
  },
  {
    "path": "mmdet/core/bbox/match_costs/builder.py",
    "chars": 275,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.utils import Registry, build_from_cfg\n\nMATCH_COST = Registry('"
  },
  {
    "path": "mmdet/core/bbox/match_costs/match_cost.py",
    "chars": 12651,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn.functional as F\n\nfrom mmdet.core.bbox.iou_c"
  },
  {
    "path": "mmdet/core/bbox/samplers/__init__.py",
    "chars": 827,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .base_sampler import BaseSampler\nfrom .combined_sampler import Comb"
  },
  {
    "path": "mmdet/core/bbox/samplers/base_sampler.py",
    "chars": 3920,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\n\nfrom .sampling_re"
  },
  {
    "path": "mmdet/core/bbox/samplers/combined_sampler.py",
    "chars": 748,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import BBOX_SAMPLERS, build_sampler\nfrom .base_sampler im"
  },
  {
    "path": "mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py",
    "chars": 2319,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom ..builder import BBOX_SAMPLERS\nfro"
  },
  {
    "path": "mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py",
    "chars": 6740,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\nfrom ..builder import BBOX_SAMPLERS\nfro"
  },
  {
    "path": "mmdet/core/bbox/samplers/mask_pseudo_sampler.py",
    "chars": 1583,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"copy from\nhttps://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_p"
  },
  {
    "path": "mmdet/core/bbox/samplers/mask_sampling_result.py",
    "chars": 2031,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"copy from\nhttps://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_p"
  },
  {
    "path": "mmdet/core/bbox/samplers/ohem_sampler.py",
    "chars": 4221,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_SAMPLERS\nfrom ..transforms impo"
  },
  {
    "path": "mmdet/core/bbox/samplers/pseudo_sampler.py",
    "chars": 1470,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_SAMPLERS\nfrom .base_sampler imp"
  },
  {
    "path": "mmdet/core/bbox/samplers/random_sampler.py",
    "chars": 3071,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ..builder import BBOX_SAMPLERS\nfrom .base_sampler imp"
  },
  {
    "path": "mmdet/core/bbox/samplers/sampling_result.py",
    "chars": 5389,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.utils import util_mixins\n\n\nclass SamplingResult"
  },
  {
    "path": "mmdet/core/bbox/samplers/score_hlr_sampler.py",
    "chars": 11235,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.ops import nms_match\n\nfrom ..builder import BBOX_"
  },
  {
    "path": "mmdet/core/bbox/transforms.py",
    "chars": 8962,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\n\n\ndef find_inside_bboxes(bboxes, img_h, "
  },
  {
    "path": "mmdet/core/data_structures/__init__.py",
    "chars": 169,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .general_data import GeneralData\nfrom .instance_data import Instanc"
  },
  {
    "path": "mmdet/core/data_structures/general_data.py",
    "chars": 11414,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport numpy as np\nimport torch\n\nfrom mmdet.utils.util_mixi"
  },
  {
    "path": "mmdet/core/data_structures/instance_data.py",
    "chars": 6926,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\n\nimport numpy as np\nimport torch\n\nfrom .general_data im"
  },
  {
    "path": "mmdet/core/evaluation/__init__.py",
    "chars": 1100,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .class_names import (cityscapes_classes, coco_classes, dataset_alia"
  },
  {
    "path": "mmdet/core/evaluation/bbox_overlaps.py",
    "chars": 2454,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\n\n\ndef bbox_overlaps(bboxes1,\n                  bboxes"
  },
  {
    "path": "mmdet/core/evaluation/class_names.py",
    "chars": 30536,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\n\n\ndef wider_face_classes():\n    return ['face']\n\n\ndef voc_cl"
  },
  {
    "path": "mmdet/core/evaluation/eval_hooks.py",
    "chars": 5604,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport bisect\nimport os.path as osp\n\nimport mmcv\nimport torch.distribute"
  },
  {
    "path": "mmdet/core/evaluation/mean_ap.py",
    "chars": 32678,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom multiprocessing import Pool\n\nimport mmcv\nimport numpy as np\nfrom mm"
  },
  {
    "path": "mmdet/core/evaluation/panoptic_utils.py",
    "chars": 273,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\n# A custom value to distinguish instance ID and category ID; need to\n# b"
  },
  {
    "path": "mmdet/core/evaluation/recall.py",
    "chars": 6806,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections.abc import Sequence\n\nimport numpy as np\nfrom mmcv.utils"
  },
  {
    "path": "mmdet/core/export/__init__.py",
    "chars": 505,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx,"
  },
  {
    "path": "mmdet/core/export/model_wrappers.py",
    "chars": 7469,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport warnings\n\nimport numpy as np\nimport torch\n\n"
  },
  {
    "path": "mmdet/core/export/onnx_helper.py",
    "chars": 8367,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os\n\nimport torch\n\n\ndef dynamic_clip_for_onnx(x1, y1, x2, y2, max_"
  },
  {
    "path": "mmdet/core/export/pytorch2onnx.py",
    "chars": 5995,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom functools import partial\n\nimport mmcv\nimport numpy as np\nimport tor"
  },
  {
    "path": "mmdet/core/hook/__init__.py",
    "chars": 752,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .checkloss_hook import CheckInvalidLossHook\nfrom .ema import ExpMom"
  },
  {
    "path": "mmdet/core/hook/checkloss_hook.py",
    "chars": 681,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.runner.hooks import HOOKS, Hook\n\n\n@HOOKS.register"
  },
  {
    "path": "mmdet/core/hook/ema.py",
    "chars": 5150,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nfrom mmcv.parallel import is_module_wrapper\nfrom mmcv.runne"
  },
  {
    "path": "mmdet/core/hook/memory_profiler_hook.py",
    "chars": 2118,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.runner.hooks import HOOKS, Hook\n\n\n@HOOKS.register_module()\ncla"
  },
  {
    "path": "mmdet/core/hook/set_epoch_info_hook.py",
    "chars": 442,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.parallel import is_module_wrapper\nfrom mmcv.runner import HOOK"
  },
  {
    "path": "mmdet/core/hook/sync_norm_hook.py",
    "chars": 1789,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections import OrderedDict\n\nfrom mmcv.runner import get_dist_in"
  },
  {
    "path": "mmdet/core/hook/sync_random_size_hook.py",
    "chars": 3061,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport random\nimport warnings\n\nimport torch\nfrom mmcv.runner import get_"
  },
  {
    "path": "mmdet/core/hook/wandblogger_hook.py",
    "chars": 23938,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport importlib\nimport os.path as osp\nimport sys\nimport warnings\n\nimpor"
  },
  {
    "path": "mmdet/core/hook/yolox_lrupdater_hook.py",
    "chars": 2310,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.runner.hooks import HOOKS\nfrom mmcv.runner.hooks.lr_updater im"
  },
  {
    "path": "mmdet/core/hook/yolox_mode_switch_hook.py",
    "chars": 2270,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom mmcv.parallel import is_module_wrapper\nfrom mmcv.runner.hooks impor"
  },
  {
    "path": "mmdet/core/mask/__init__.py",
    "chars": 375,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .mask_target import mask_target\nfrom .structures import BaseInstanc"
  },
  {
    "path": "mmdet/core/mask/mask_target.py",
    "chars": 5115,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom torch.nn.modules.utils import _pair"
  },
  {
    "path": "mmdet/core/mask/structures.py",
    "chars": 41496,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nimport cv2\nimport mmcv\nimport n"
  },
  {
    "path": "mmdet/core/mask/utils.py",
    "chars": 3017,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\nimport pycocotools.mask as mask_util\nimpo"
  },
  {
    "path": "mmdet/core/optimizers/__init__.py",
    "chars": 300,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .builder import OPTIMIZER_BUILDERS, build_optimizer\nfrom .layer_dec"
  },
  {
    "path": "mmdet/core/optimizers/builder.py",
    "chars": 1218,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nfrom mmcv.runner.optimizer import OPTIMIZER_BUILDERS as MMC"
  },
  {
    "path": "mmdet/core/optimizers/layer_decay_optimizer_constructor.py",
    "chars": 5856,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport json\n\nfrom mmcv.runner import DefaultOptimizerConstructor, get_di"
  },
  {
    "path": "mmdet/core/post_processing/__init__.py",
    "chars": 412,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .bbox_nms import fast_nms, multiclass_nms\nfrom .matrix_nms import m"
  },
  {
    "path": "mmdet/core/post_processing/bbox_nms.py",
    "chars": 6495,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.ops.nms import batched_nms\n\nfrom mmdet.core.bbox."
  },
  {
    "path": "mmdet/core/post_processing/matrix_nms.py",
    "chars": 4622,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\n\ndef mask_matrix_nms(masks,\n                    labels,\n  "
  },
  {
    "path": "mmdet/core/post_processing/merge_augs.py",
    "chars": 5790,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\n\nimport numpy as np\nimport torch\nfrom mmcv i"
  },
  {
    "path": "mmdet/core/utils/__init__.py",
    "chars": 635,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_g"
  },
  {
    "path": "mmdet/core/utils/dist_utils.py",
    "chars": 6525,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport functools\nimport pickle\nimport warnings\nfrom collections import O"
  },
  {
    "path": "mmdet/core/utils/misc.py",
    "chars": 7147,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom functools import partial\n\nimport numpy as np\nimport torch\nfrom six."
  },
  {
    "path": "mmdet/core/visualization/__init__.py",
    "chars": 318,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .image import (color_val_matplotlib, imshow_det_bboxes,\n           "
  },
  {
    "path": "mmdet/core/visualization/image.py",
    "chars": 21190,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport sys\n\nimport cv2\nimport matplotlib.pyplot as plt\nimport mmcv\nimpor"
  },
  {
    "path": "mmdet/core/visualization/palette.py",
    "chars": 2068,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\n\n\ndef palette_val(palette):\n    \"\"\"Conver"
  },
  {
    "path": "mmdet/datasets/__init__.py",
    "chars": 1648,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .builder import DATASETS, PIPELINES, build_dataloader, build_datase"
  },
  {
    "path": "mmdet/datasets/api_wrappers/__init__.py",
    "chars": 253,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .coco_api import COCO, COCOeval\nfrom .panoptic_evaluation import pq"
  },
  {
    "path": "mmdet/datasets/api_wrappers/coco_api.py",
    "chars": 1506,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\n# This file add snake case alias for coco api\n\nimport warnings\n\nimport p"
  },
  {
    "path": "mmdet/datasets/api_wrappers/panoptic_evaluation.py",
    "chars": 9113,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\n\n# Copyright (c) 2018, Alexander Kirillov\n# This file supports `file_cli"
  },
  {
    "path": "mmdet/datasets/builder.py",
    "chars": 8421,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport platform\nimport random\nimport warnings\nfrom functools"
  },
  {
    "path": "mmdet/datasets/cityscapes.py",
    "chars": 14529,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\n# Modified from https://github.com/facebookresearch/detectron2/blob/mast"
  },
  {
    "path": "mmdet/datasets/coco.py",
    "chars": 28333,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport contextlib\nimport io\nimport itertools\nimport logging\nimport os.pa"
  },
  {
    "path": "mmdet/datasets/coco_occluded.py",
    "chars": 9166,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\n\nimport mmcv\nimport numpy as np\nfrom mmcv.fileio i"
  },
  {
    "path": "mmdet/datasets/coco_panoptic.py",
    "chars": 29277,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\nimport os\nfrom collections import defaultdict\n\nimport m"
  },
  {
    "path": "mmdet/datasets/custom.py",
    "chars": 15497,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport warnings\nfrom collections import OrderedDic"
  },
  {
    "path": "mmdet/datasets/dataset_wrappers.py",
    "chars": 17774,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport bisect\nimport collections\nimport copy\nimport math\nfrom collection"
  },
  {
    "path": "mmdet/datasets/deepfashion.py",
    "chars": 642,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .builder import DATASETS\nfrom .coco import CocoDataset\n\n\n@DATASETS."
  },
  {
    "path": "mmdet/datasets/lvis.py",
    "chars": 46336,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\nimport logging\nimport os.path as osp\nimport tempfile\nim"
  },
  {
    "path": "mmdet/datasets/objects365.py",
    "chars": 13187,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\n\nfrom .api_wrappers import COCO\nfrom .builder impo"
  },
  {
    "path": "mmdet/datasets/openimages.py",
    "chars": 35084,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport csv\nimport json\nimport os.path as osp\nimport warnings"
  },
  {
    "path": "mmdet/datasets/pipelines/__init__.py",
    "chars": 1807,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .auto_augment import (AutoAugment, BrightnessTransform, ColorTransf"
  },
  {
    "path": "mmdet/datasets/pipelines/auto_augment.py",
    "chars": 36538,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport cv2\nimport mmcv\nimport numpy as np\n\nfrom ..builder i"
  },
  {
    "path": "mmdet/datasets/pipelines/compose.py",
    "chars": 1626,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport collections\n\nfrom mmcv.utils import build_from_cfg\n\nfrom ..builde"
  },
  {
    "path": "mmdet/datasets/pipelines/formating.py",
    "chars": 293,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\n# flake8: noqa\nimport warnings\n\nfrom .formatting import *\n\nwarnings.warn"
  },
  {
    "path": "mmdet/datasets/pipelines/formatting.py",
    "chars": 13853,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections.abc import Sequence\n\nimport mmcv\nimport numpy as np\nimp"
  },
  {
    "path": "mmdet/datasets/pipelines/instaboost.py",
    "chars": 4508,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\n\nfrom ..builder import PIPELINES\n\n\n@PIPELINES.registe"
  },
  {
    "path": "mmdet/datasets/pipelines/loading.py",
    "chars": 22759,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\n\nimport mmcv\nimport numpy as np\nimport pycocotools"
  },
  {
    "path": "mmdet/datasets/pipelines/test_time_aug.py",
    "chars": 4466,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport mmcv\n\nfrom ..builder import PIPELINES\nfrom .comp"
  },
  {
    "path": "mmdet/datasets/pipelines/transforms.py",
    "chars": 118461,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport inspect\nimport math\nimport warnings\n\nimport cv2\nimpor"
  },
  {
    "path": "mmdet/datasets/samplers/__init__.py",
    "chars": 455,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .class_aware_sampler import ClassAwareSampler\nfrom .distributed_sam"
  },
  {
    "path": "mmdet/datasets/samplers/class_aware_sampler.py",
    "chars": 6543,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nfrom mmcv.runner import get_dist_info\nfrom tor"
  },
  {
    "path": "mmdet/datasets/samplers/distributed_sampler.py",
    "chars": 2037,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nfrom torch.utils.data import DistributedSample"
  },
  {
    "path": "mmdet/datasets/samplers/group_sampler.py",
    "chars": 5384,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport numpy as np\nimport torch\nfrom mmcv.runner import get"
  },
  {
    "path": "mmdet/datasets/samplers/infinite_sampler.py",
    "chars": 7064,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport itertools\n\nimport numpy as np\nimport torch\nfrom mmcv.runner impor"
  },
  {
    "path": "mmdet/datasets/utils.py",
    "chars": 6670,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\n\nfrom mmcv.cnn import VGG\nfrom mmcv.runner.h"
  },
  {
    "path": "mmdet/datasets/voc.py",
    "chars": 4724,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom collections import OrderedDict\n\nfrom mmcv.utils import print_log\n\nf"
  },
  {
    "path": "mmdet/datasets/wider_face.py",
    "chars": 1578,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport xml.etree.ElementTree as ET\n\nimport mmcv\n\nf"
  },
  {
    "path": "mmdet/datasets/xml_style.py",
    "chars": 6243,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport os.path as osp\nimport xml.etree.ElementTree as ET\n\nimport mmcv\nim"
  },
  {
    "path": "mmdet/models/__init__.py",
    "chars": 899,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .backbones import *  # noqa: F401,F403\nfrom .builder import (BACKBO"
  },
  {
    "path": "mmdet/models/backbones/__init__.py",
    "chars": 999,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .csp_darknet import CSPDarknet\nfrom .darknet import Darknet\nfrom .d"
  },
  {
    "path": "mmdet/models/backbones/csp_darknet.py",
    "chars": 10543,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import Con"
  },
  {
    "path": "mmdet/models/backbones/darknet.py",
    "chars": 8233,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\n# Copyright (c) 2019 Western Digital Corporation or its affiliates.\n\nimp"
  },
  {
    "path": "mmdet/models/backbones/detectors_resnet.py",
    "chars": 12736,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.utils.checkpoint as cp\nfrom mmcv.cnn "
  },
  {
    "path": "mmdet/models/backbones/detectors_resnext.py",
    "chars": 3920,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfr"
  },
  {
    "path": "mmdet/models/backbones/efficientnet.py",
    "chars": 16218,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport math\nfrom functools import partial\n\nimport torch\nimpo"
  },
  {
    "path": "mmdet/models/backbones/hourglass.py",
    "chars": 7494,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom mmcv.cnn impo"
  },
  {
    "path": "mmdet/models/backbones/hrnet.py",
    "chars": 23106,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import build_conv_l"
  },
  {
    "path": "mmdet/models/backbones/mobilenet_v2.py",
    "chars": 7599,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nf"
  },
  {
    "path": "mmdet/models/backbones/pvt.py",
    "chars": 23217,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\nimport warnings\n\nimport numpy as np\nimport torch\nimport torc"
  },
  {
    "path": "mmdet/models/backbones/regnet.py",
    "chars": 13605,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport numpy as np\nimport torch.nn as nn\nfrom mmcv.cnn "
  },
  {
    "path": "mmdet/models/backbones/res2net.py",
    "chars": 11659,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.utils.check"
  },
  {
    "path": "mmdet/models/backbones/resnest.py",
    "chars": 10579,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.function"
  },
  {
    "path": "mmdet/models/backbones/resnet.py",
    "chars": 23840,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nimport torch.utils.checkpoint as "
  },
  {
    "path": "mmdet/models/backbones/resnext.py",
    "chars": 5712,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport math\n\nfrom mmcv.cnn import build_conv_layer, build_norm_layer\n\nfr"
  },
  {
    "path": "mmdet/models/backbones/ssd_vgg.py",
    "chars": 4705,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch.nn as nn\nfrom mmcv.cnn import VGG\nfrom mmc"
  },
  {
    "path": "mmdet/models/backbones/swin.py",
    "chars": 30579,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom collections import OrderedDict\nfrom copy import dee"
  },
  {
    "path": "mmdet/models/backbones/trident_resnet.py",
    "chars": 11129,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimpor"
  },
  {
    "path": "mmdet/models/builder.py",
    "chars": 1449,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nfrom mmcv.cnn import MODELS as MMCV_MODELS\nfrom mmcv.ut"
  },
  {
    "path": "mmdet/models/dense_heads/__init__.py",
    "chars": 2787,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom .anchor_free_head import AnchorFreeHead\nfrom .anchor_head import An"
  },
  {
    "path": "mmdet/models/dense_heads/anchor_free_head.py",
    "chars": 13958,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\nfrom abc import abstractmethod\n\nimport torch\nimport torc"
  },
  {
    "path": "mmdet/models/dense_heads/anchor_head.py",
    "chars": 24605,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner imp"
  },
  {
    "path": "mmdet/models/dense_heads/ascend_anchor_head.py",
    "chars": 18527,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom ...core.bbox.assigners import AscendMaxIoUAssigner\nfr"
  },
  {
    "path": "mmdet/models/dense_heads/ascend_retina_head.py",
    "chars": 5091,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom ..builder import HEADS\nfrom .ascend_anchor_head import AscendAnchor"
  },
  {
    "path": "mmdet/models/dense_heads/ascend_ssd_head.py",
    "chars": 14349,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn.functional as F\nfrom mmcv.runner import for"
  },
  {
    "path": "mmdet/models/dense_heads/atss_head.py",
    "chars": 21149,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scal"
  },
  {
    "path": "mmdet/models/dense_heads/autoassign_head.py",
    "chars": 23096,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.func"
  },
  {
    "path": "mmdet/models/dense_heads/base_dense_head.py",
    "chars": 23226,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nimport torch\nfrom mmcv.cnn.util"
  },
  {
    "path": "mmdet/models/dense_heads/base_mask_head.py",
    "chars": 4539,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom abc import ABCMeta, abstractmethod\n\nfrom mmcv.runner import BaseMod"
  },
  {
    "path": "mmdet/models/dense_heads/cascade_rpn_head.py",
    "chars": 33996,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom __future__ import division\nimport copy\nimport warnings\n\nimport torc"
  },
  {
    "path": "mmdet/models/dense_heads/centernet_head.py",
    "chars": 18025,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import bias_init_with_p"
  },
  {
    "path": "mmdet/models/dense_heads/centripetal_head.py",
    "chars": 19882,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, normal_init\nfrom "
  },
  {
    "path": "mmdet/models/dense_heads/corner_head.py",
    "chars": 48502,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nfrom logging import warning\nfrom math import ceil, log\n\nimport torch\nimp"
  },
  {
    "path": "mmdet/models/dense_heads/ddod_head.py",
    "chars": 34312,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scal"
  },
  {
    "path": "mmdet/models/dense_heads/deformable_detr_head.py",
    "chars": 13708,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.function"
  },
  {
    "path": "mmdet/models/dense_heads/dense_test_mixins.py",
    "chars": 8421,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport sys\nfrom inspect import signature\n\nimport torch\nfrom mmcv.ops imp"
  },
  {
    "path": "mmdet/models/dense_heads/detr_head.py",
    "chars": 39707,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom "
  },
  {
    "path": "mmdet/models/dense_heads/embedding_rpn_head.py",
    "chars": 4629,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nfrom mmcv.runner import BaseModule\n\nf"
  },
  {
    "path": "mmdet/models/dense_heads/fcos_head.py",
    "chars": 19709,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import"
  },
  {
    "path": "mmdet/models/dense_heads/fovea_head.py",
    "chars": 16364,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn import"
  },
  {
    "path": "mmdet/models/dense_heads/free_anchor_retina_head.py",
    "chars": 11254,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn.functional as F\n\nfrom mmdet.core import bbo"
  },
  {
    "path": "mmdet/models/dense_heads/fsaf_head.py",
    "chars": 19337,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom"
  },
  {
    "path": "mmdet/models/dense_heads/ga_retina_head.py",
    "chars": 3931,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\nfrom mmcv.ops impo"
  },
  {
    "path": "mmdet/models/dense_heads/ga_rpn_head.py",
    "chars": 7052,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport t"
  },
  {
    "path": "mmdet/models/dense_heads/gfl_head.py",
    "chars": 27913,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom "
  },
  {
    "path": "mmdet/models/dense_heads/guided_anchor_head.py",
    "chars": 37334,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nfrom mmcv.ops import"
  },
  {
    "path": "mmdet/models/dense_heads/lad_head.py",
    "chars": 10058,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import "
  },
  {
    "path": "mmdet/models/dense_heads/ld_head.py",
    "chars": 10636,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import "
  },
  {
    "path": "mmdet/models/dense_heads/mask2former_head.py",
    "chars": 19469,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.function"
  },
  {
    "path": "mmdet/models/dense_heads/maskformer_head.py",
    "chars": 25113,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom "
  },
  {
    "path": "mmdet/models/dense_heads/nasfcos_head.py",
    "chars": 2908,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, Scal"
  },
  {
    "path": "mmdet/models/dense_heads/paa_head.py",
    "chars": 34042,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom"
  },
  {
    "path": "mmdet/models/dense_heads/pisa_retinanet_head.py",
    "chars": 6267,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\nfrom mmcv.runner import force_fp32\n\nfrom mmdet.core import "
  },
  {
    "path": "mmdet/models/dense_heads/pisa_ssd_head.py",
    "chars": 5598,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch\n\nfrom mmdet.core import multi_apply\nfrom ..builder import H"
  },
  {
    "path": "mmdet/models/dense_heads/reppoints_head.py",
    "chars": 34936,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom mmcv.cnn impo"
  },
  {
    "path": "mmdet/models/dense_heads/retina_head.py",
    "chars": 4059,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule\n\nfrom ..builder im"
  },
  {
    "path": "mmdet/models/dense_heads/retina_sepbn_head.py",
    "chars": 4566,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport torch.nn as nn\nfrom mmcv.cnn import ConvModule, bias_init_with_pr"
  },
  {
    "path": "mmdet/models/dense_heads/rpn_head.py",
    "chars": 11202,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport copy\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.function"
  },
  {
    "path": "mmdet/models/dense_heads/sabl_retina_head.py",
    "chars": 27410,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nf"
  },
  {
    "path": "mmdet/models/dense_heads/solo_head.py",
    "chars": 48058,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport mmcv\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport"
  },
  {
    "path": "mmdet/models/dense_heads/solov2_head.py",
    "chars": 31361,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport mmcv\nimport numpy as np\nimport torch\nimport torc"
  },
  {
    "path": "mmdet/models/dense_heads/ssd_head.py",
    "chars": 14790,
    "preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport warnings\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.func"
  }
]

// ... and 283 more files (download for full content)

About this extraction

This page contains the full source code of the RockeyCoss/Prompt-Segment-Anything GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 483 files (3.8 MB), approximately 1.0M tokens, and a symbol index with 3240 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.

Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.

Copied to clipboard!