Showing preview only (6,482K chars total). Download the full file or copy to clipboard to get everything.
Repository: 0ssamaak0/DLTA-AI
Branch: master
Commit: 9296b693d28c
Files: 1415
Total size: 5.9 MB
Directory structure:
gitextract_auoxlo5f/
├── .github/
│ └── workflows/
│ └── retype-action.yml
├── .gitignore
├── DLTA_AI_app/
│ ├── .flake8
│ ├── .gitignore
│ ├── .gitmodules
│ ├── __main__.py
│ ├── __main__.spec
│ ├── inferencing.py
│ ├── labelme/
│ │ ├── __init__.py
│ │ ├── app.py
│ │ ├── cli/
│ │ │ ├── __init__.py
│ │ │ ├── draw_json.py
│ │ │ ├── draw_label_png.py
│ │ │ ├── json_to_dataset.py
│ │ │ └── on_docker.py
│ │ ├── config/
│ │ │ ├── __init__.py
│ │ │ ├── default_config.yaml
│ │ │ └── default_config_base.yaml
│ │ ├── intelligence.py
│ │ ├── label_file.py
│ │ ├── logger.py
│ │ ├── shape.py
│ │ ├── testing.py
│ │ ├── utils/
│ │ │ ├── __init__.py
│ │ │ ├── _io.py
│ │ │ ├── custom_exports.py
│ │ │ ├── export.py
│ │ │ ├── helpers/
│ │ │ │ ├── mathOps.py
│ │ │ │ └── visualizations.py
│ │ │ ├── image.py
│ │ │ ├── model_explorer.py
│ │ │ ├── qt.py
│ │ │ ├── sam.py
│ │ │ ├── shape.py
│ │ │ └── vid_to_frames.py
│ │ └── widgets/
│ │ ├── ClassesWidget.py
│ │ ├── MsgBox.py
│ │ ├── ThresholdWidget.py
│ │ ├── __init__.py
│ │ ├── brightness_contrast_dialog.py
│ │ ├── canvas.py
│ │ ├── check_updates_UI.py
│ │ ├── color_dialog.py
│ │ ├── deleteSelectedShape_UI.py
│ │ ├── editLabel_videoMode.py
│ │ ├── escapable_qlist_widget.py
│ │ ├── exportData_UI.py
│ │ ├── feedback_UI.py
│ │ ├── getIDfromUser_UI.py
│ │ ├── interpolation_UI.py
│ │ ├── label_dialog.py
│ │ ├── label_list_widget.py
│ │ ├── links.py
│ │ ├── merge_feature_UI.py
│ │ ├── notification.py
│ │ ├── open_file.py
│ │ ├── preferences_UI.py
│ │ ├── runtime_data_UI.py
│ │ ├── scaleObject_UI.py
│ │ ├── segmentation_options_UI.py
│ │ ├── shortcut_selector_UI.py
│ │ ├── tool_bar.py
│ │ ├── unique_label_qlist_widget.py
│ │ └── zoom_widget.py
│ ├── mmdetection/
│ │ ├── .circleci/
│ │ │ └── config.yml
│ │ ├── .dev_scripts/
│ │ │ ├── batch_test_list.py
│ │ │ ├── batch_train_list.txt
│ │ │ ├── benchmark_filter.py
│ │ │ ├── benchmark_inference_fps.py
│ │ │ ├── benchmark_test_image.py
│ │ │ ├── check_links.py
│ │ │ ├── convert_test_benchmark_script.py
│ │ │ ├── convert_train_benchmark_script.py
│ │ │ ├── gather_models.py
│ │ │ ├── gather_test_benchmark_metric.py
│ │ │ ├── gather_train_benchmark_metric.py
│ │ │ ├── linter.sh
│ │ │ ├── test_benchmark.sh
│ │ │ ├── test_init_backbone.py
│ │ │ └── train_benchmark.sh
│ │ ├── .gitignore
│ │ ├── .owners.yml
│ │ ├── .pre-commit-config.yaml
│ │ ├── .readthedocs.yml
│ │ ├── CITATION.cff
│ │ ├── LICENSE
│ │ ├── MANIFEST.in
│ │ ├── configs/
│ │ │ ├── _base_/
│ │ │ │ ├── datasets/
│ │ │ │ │ ├── cityscapes_detection.py
│ │ │ │ │ ├── cityscapes_instance.py
│ │ │ │ │ ├── coco_detection.py
│ │ │ │ │ ├── coco_instance.py
│ │ │ │ │ ├── coco_instance_semantic.py
│ │ │ │ │ ├── coco_panoptic.py
│ │ │ │ │ ├── deepfashion.py
│ │ │ │ │ ├── lvis_v0.5_instance.py
│ │ │ │ │ ├── lvis_v1_instance.py
│ │ │ │ │ ├── openimages_detection.py
│ │ │ │ │ ├── voc0712.py
│ │ │ │ │ └── wider_face.py
│ │ │ │ ├── default_runtime.py
│ │ │ │ ├── models/
│ │ │ │ │ ├── cascade_mask_rcnn_r50_fpn.py
│ │ │ │ │ ├── cascade_rcnn_r50_fpn.py
│ │ │ │ │ ├── fast_rcnn_r50_fpn.py
│ │ │ │ │ ├── faster_rcnn_r50_caffe_c4.py
│ │ │ │ │ ├── faster_rcnn_r50_caffe_dc5.py
│ │ │ │ │ ├── faster_rcnn_r50_fpn.py
│ │ │ │ │ ├── mask_rcnn_r50_caffe_c4.py
│ │ │ │ │ ├── mask_rcnn_r50_fpn.py
│ │ │ │ │ ├── retinanet_r50_fpn.py
│ │ │ │ │ ├── rpn_r50_caffe_c4.py
│ │ │ │ │ ├── rpn_r50_fpn.py
│ │ │ │ │ └── ssd300.py
│ │ │ │ └── schedules/
│ │ │ │ ├── schedule_1x.py
│ │ │ │ ├── schedule_20e.py
│ │ │ │ └── schedule_2x.py
│ │ │ ├── albu_example/
│ │ │ │ └── mask_rcnn_r50_fpn_albu_1x_coco.py
│ │ │ ├── atss/
│ │ │ │ ├── atss_r101_fpn_1x_coco.py
│ │ │ │ ├── atss_r50_fpn_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── autoassign/
│ │ │ │ ├── autoassign_r50_fpn_8x2_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── carafe/
│ │ │ │ ├── faster_rcnn_r50_fpn_carafe_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_carafe_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── cascade_rcnn/
│ │ │ │ ├── cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r101_fpn_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r101_fpn_20e_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r50_fpn_20e_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_rcnn_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── cascade_rcnn_r101_fpn_1x_coco.py
│ │ │ │ ├── cascade_rcnn_r101_fpn_20e_coco.py
│ │ │ │ ├── cascade_rcnn_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── cascade_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── cascade_rcnn_r50_fpn_20e_coco.py
│ │ │ │ ├── cascade_rcnn_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── cascade_rcnn_x101_32x4d_fpn_20e_coco.py
│ │ │ │ ├── cascade_rcnn_x101_64x4d_fpn_1x_coco.py
│ │ │ │ ├── cascade_rcnn_x101_64x4d_fpn_20e_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── cascade_rpn/
│ │ │ │ ├── crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── crpn_r50_caffe_fpn_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── centernet/
│ │ │ │ ├── centernet_resnet18_140e_coco.py
│ │ │ │ ├── centernet_resnet18_dcnv2_140e_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── centripetalnet/
│ │ │ │ ├── centripetalnet_hourglass104_mstest_16x6_210e_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── cityscapes/
│ │ │ │ ├── faster_rcnn_r50_fpn_1x_cityscapes.py
│ │ │ │ └── mask_rcnn_r50_fpn_1x_cityscapes.py
│ │ │ ├── common/
│ │ │ │ ├── lsj_100e_coco_instance.py
│ │ │ │ ├── mstrain-poly_3x_coco_instance.py
│ │ │ │ ├── mstrain_3x_coco.py
│ │ │ │ ├── mstrain_3x_coco_instance.py
│ │ │ │ ├── ssj_270k_coco_instance.py
│ │ │ │ └── ssj_scp_270k_coco_instance.py
│ │ │ ├── convnext/
│ │ │ │ ├── cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py
│ │ │ │ ├── mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── cornernet/
│ │ │ │ ├── cornernet_hourglass104_mstest_10x5_210e_coco.py
│ │ │ │ ├── cornernet_hourglass104_mstest_32x3_210e_coco.py
│ │ │ │ ├── cornernet_hourglass104_mstest_8x6_210e_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── dcn/
│ │ │ │ ├── cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_dpool_1x_coco.py
│ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── dcnv2/
│ │ │ │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_mdpool_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── ddod/
│ │ │ │ ├── ddod_r50_fpn_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── deepfashion/
│ │ │ │ └── mask_rcnn_r50_fpn_15e_deepfashion.py
│ │ │ ├── deformable_detr/
│ │ │ │ ├── deformable_detr_r50_16x2_50e_coco.py
│ │ │ │ ├── deformable_detr_refine_r50_16x2_50e_coco.py
│ │ │ │ ├── deformable_detr_twostage_refine_r50_16x2_50e_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── detectors/
│ │ │ │ ├── cascade_rcnn_r50_rfp_1x_coco.py
│ │ │ │ ├── cascade_rcnn_r50_sac_1x_coco.py
│ │ │ │ ├── detectors_cascade_rcnn_r50_1x_coco.py
│ │ │ │ ├── detectors_htc_r101_20e_coco.py
│ │ │ │ ├── detectors_htc_r50_1x_coco.py
│ │ │ │ ├── htc_r50_rfp_1x_coco.py
│ │ │ │ ├── htc_r50_sac_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── detr/
│ │ │ │ ├── detr_r50_8x2_150e_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── double_heads/
│ │ │ │ ├── dh_faster_rcnn_r50_fpn_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── dyhead/
│ │ │ │ ├── atss_r50_caffe_fpn_dyhead_1x_coco.py
│ │ │ │ ├── atss_r50_fpn_dyhead_1x_coco.py
│ │ │ │ ├── atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── dynamic_rcnn/
│ │ │ │ ├── dynamic_rcnn_r50_fpn_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── efficientnet/
│ │ │ │ ├── metafile.yml
│ │ │ │ └── retinanet_effb3_fpn_crop896_8x4_1x_coco.py
│ │ │ ├── empirical_attention/
│ │ │ │ ├── faster_rcnn_r50_fpn_attention_0010_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_attention_1111_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── fast_rcnn/
│ │ │ │ ├── fast_rcnn_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── fast_rcnn_r101_fpn_1x_coco.py
│ │ │ │ ├── fast_rcnn_r101_fpn_2x_coco.py
│ │ │ │ ├── fast_rcnn_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── fast_rcnn_r50_fpn_1x_coco.py
│ │ │ │ └── fast_rcnn_r50_fpn_2x_coco.py
│ │ │ ├── faster_rcnn/
│ │ │ │ ├── faster_rcnn_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_r101_fpn_1x_coco.py
│ │ │ │ ├── faster_rcnn_r101_fpn_2x_coco.py
│ │ │ │ ├── faster_rcnn_r101_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_c4_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_dc5_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_fpn_90k_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py
│ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py
│ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_2x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_bounded_iou_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_ciou_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_fp16_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_giou_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_iou_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_ohem_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_soft_nms_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py
│ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_2x_coco.py
│ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_x101_64x4d_fpn_1x_coco.py
│ │ │ │ ├── faster_rcnn_x101_64x4d_fpn_2x_coco.py
│ │ │ │ ├── faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── fcos/
│ │ │ │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py
│ │ │ │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py
│ │ │ │ ├── fcos_center_r50_caffe_fpn_gn-head_1x_coco.py
│ │ │ │ ├── fcos_r101_caffe_fpn_gn-head_1x_coco.py
│ │ │ │ ├── fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py
│ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_1x_coco.py
│ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py
│ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_fp16_1x_bs8x8_coco.py
│ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py
│ │ │ │ ├── fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── foveabox/
│ │ │ │ ├── fovea_align_r101_fpn_gn-head_4x4_2x_coco.py
│ │ │ │ ├── fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py
│ │ │ │ ├── fovea_align_r50_fpn_gn-head_4x4_2x_coco.py
│ │ │ │ ├── fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py
│ │ │ │ ├── fovea_r101_fpn_4x4_1x_coco.py
│ │ │ │ ├── fovea_r101_fpn_4x4_2x_coco.py
│ │ │ │ ├── fovea_r50_fpn_4x4_1x_coco.py
│ │ │ │ ├── fovea_r50_fpn_4x4_2x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── fpg/
│ │ │ │ ├── faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpg_crop640_50e_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_crop640_50e_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpg_crop640_50e_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_crop640_50e_coco.py
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── retinanet_r50_fpg-chn128_crop640_50e_coco.py
│ │ │ │ └── retinanet_r50_fpg_crop640_50e_coco.py
│ │ │ ├── free_anchor/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── retinanet_free_anchor_r101_fpn_1x_coco.py
│ │ │ │ ├── retinanet_free_anchor_r50_fpn_1x_coco.py
│ │ │ │ └── retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py
│ │ │ ├── fsaf/
│ │ │ │ ├── fsaf_r101_fpn_1x_coco.py
│ │ │ │ ├── fsaf_r50_fpn_1x_coco.py
│ │ │ │ ├── fsaf_x101_64x4d_fpn_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── gcnet/
│ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── gfl/
│ │ │ │ ├── gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py
│ │ │ │ ├── gfl_r101_fpn_mstrain_2x_coco.py
│ │ │ │ ├── gfl_r50_fpn_1x_coco.py
│ │ │ │ ├── gfl_r50_fpn_mstrain_2x_coco.py
│ │ │ │ ├── gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py
│ │ │ │ ├── gfl_x101_32x4d_fpn_mstrain_2x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── ghm/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── retinanet_ghm_r101_fpn_1x_coco.py
│ │ │ │ ├── retinanet_ghm_r50_fpn_1x_coco.py
│ │ │ │ ├── retinanet_ghm_x101_32x4d_fpn_1x_coco.py
│ │ │ │ └── retinanet_ghm_x101_64x4d_fpn_1x_coco.py
│ │ │ ├── gn/
│ │ │ │ ├── mask_rcnn_r101_fpn_gn-all_2x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_gn-all_3x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_2x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_3x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── gn+ws/
│ │ │ │ ├── faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py
│ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py
│ │ │ │ ├── faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py
│ │ │ │ ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py
│ │ │ │ ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── grid_rcnn/
│ │ │ │ ├── grid_rcnn_r101_fpn_gn-head_2x_coco.py
│ │ │ │ ├── grid_rcnn_r50_fpn_gn-head_1x_coco.py
│ │ │ │ ├── grid_rcnn_r50_fpn_gn-head_2x_coco.py
│ │ │ │ ├── grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py
│ │ │ │ ├── grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── groie/
│ │ │ │ ├── faster_rcnn_r50_fpn_groie_1x_coco.py
│ │ │ │ ├── grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_groie_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── guided_anchoring/
│ │ │ │ ├── ga_fast_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── ga_faster_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── ga_faster_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── ga_faster_r50_fpn_1x_coco.py
│ │ │ │ ├── ga_faster_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── ga_faster_x101_64x4d_fpn_1x_coco.py
│ │ │ │ ├── ga_retinanet_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── ga_retinanet_r101_caffe_fpn_mstrain_2x.py
│ │ │ │ ├── ga_retinanet_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── ga_retinanet_r50_fpn_1x_coco.py
│ │ │ │ ├── ga_retinanet_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── ga_retinanet_x101_64x4d_fpn_1x_coco.py
│ │ │ │ ├── ga_rpn_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── ga_rpn_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── ga_rpn_r50_fpn_1x_coco.py
│ │ │ │ ├── ga_rpn_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── ga_rpn_x101_64x4d_fpn_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── hrnet/
│ │ │ │ ├── cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py
│ │ │ │ ├── cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py
│ │ │ │ ├── cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py
│ │ │ │ ├── cascade_rcnn_hrnetv2p_w18_20e_coco.py
│ │ │ │ ├── cascade_rcnn_hrnetv2p_w32_20e_coco.py
│ │ │ │ ├── cascade_rcnn_hrnetv2p_w40_20e_coco.py
│ │ │ │ ├── faster_rcnn_hrnetv2p_w18_1x_coco.py
│ │ │ │ ├── faster_rcnn_hrnetv2p_w18_2x_coco.py
│ │ │ │ ├── faster_rcnn_hrnetv2p_w32_1x_coco.py
│ │ │ │ ├── faster_rcnn_hrnetv2p_w32_2x_coco.py
│ │ │ │ ├── faster_rcnn_hrnetv2p_w40_1x_coco.py
│ │ │ │ ├── faster_rcnn_hrnetv2p_w40_2x_coco.py
│ │ │ │ ├── fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py
│ │ │ │ ├── fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py
│ │ │ │ ├── fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py
│ │ │ │ ├── fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py
│ │ │ │ ├── fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py
│ │ │ │ ├── fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py
│ │ │ │ ├── fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py
│ │ │ │ ├── htc_hrnetv2p_w18_20e_coco.py
│ │ │ │ ├── htc_hrnetv2p_w32_20e_coco.py
│ │ │ │ ├── htc_hrnetv2p_w40_20e_coco.py
│ │ │ │ ├── htc_hrnetv2p_w40_28e_coco.py
│ │ │ │ ├── htc_x101_64x4d_fpn_16x1_28e_coco.py
│ │ │ │ ├── mask_rcnn_hrnetv2p_w18_1x_coco.py
│ │ │ │ ├── mask_rcnn_hrnetv2p_w18_2x_coco.py
│ │ │ │ ├── mask_rcnn_hrnetv2p_w32_1x_coco.py
│ │ │ │ ├── mask_rcnn_hrnetv2p_w32_2x_coco.py
│ │ │ │ ├── mask_rcnn_hrnetv2p_w40_1x_coco.py
│ │ │ │ ├── mask_rcnn_hrnetv2p_w40_2x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── htc/
│ │ │ │ ├── htc_r101_fpn_20e_coco.py
│ │ │ │ ├── htc_r50_fpn_1x_coco.py
│ │ │ │ ├── htc_r50_fpn_20e_coco.py
│ │ │ │ ├── htc_without_semantic_r50_fpn_1x_coco.py
│ │ │ │ ├── htc_x101_32x4d_fpn_16x1_20e_coco.py
│ │ │ │ ├── htc_x101_64x4d_fpn_16x1_20e_coco.py
│ │ │ │ ├── htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── instaboost/
│ │ │ │ ├── cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_instaboost_4x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_instaboost_4x_coco.py
│ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── lad/
│ │ │ │ ├── lad_r101_paa_r50_fpn_coco_1x.py
│ │ │ │ ├── lad_r50_paa_r101_fpn_coco_1x.py
│ │ │ │ └── metafile.yml
│ │ │ ├── ld/
│ │ │ │ ├── ld_r101_gflv1_r101dcn_fpn_coco_2x.py
│ │ │ │ ├── ld_r18_gflv1_r101_fpn_coco_1x.py
│ │ │ │ ├── ld_r34_gflv1_r101_fpn_coco_1x.py
│ │ │ │ ├── ld_r50_gflv1_r101_fpn_coco_1x.py
│ │ │ │ └── metafile.yml
│ │ │ ├── legacy_1.x/
│ │ │ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco_v1.py
│ │ │ │ ├── faster_rcnn_r50_fpn_1x_coco_v1.py
│ │ │ │ ├── mask_rcnn_r50_fpn_1x_coco_v1.py
│ │ │ │ ├── retinanet_r50_caffe_fpn_1x_coco_v1.py
│ │ │ │ ├── retinanet_r50_fpn_1x_coco_v1.py
│ │ │ │ └── ssd300_coco_v1.py
│ │ │ ├── libra_rcnn/
│ │ │ │ ├── libra_fast_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── libra_faster_rcnn_r101_fpn_1x_coco.py
│ │ │ │ ├── libra_faster_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py
│ │ │ │ ├── libra_retinanet_r50_fpn_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── lvis/
│ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py
│ │ │ │ └── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
│ │ │ ├── mask2former/
│ │ │ │ ├── mask2former_r101_lsj_8x2_50e_coco-panoptic.py
│ │ │ │ ├── mask2former_r101_lsj_8x2_50e_coco.py
│ │ │ │ ├── mask2former_r50_lsj_8x2_50e_coco-panoptic.py
│ │ │ │ ├── mask2former_r50_lsj_8x2_50e_coco.py
│ │ │ │ ├── mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py
│ │ │ │ ├── mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py
│ │ │ │ ├── mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py
│ │ │ │ ├── mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py
│ │ │ │ ├── mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py
│ │ │ │ ├── mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py
│ │ │ │ ├── mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── mask_rcnn/
│ │ │ │ ├── mask_rcnn_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_2x_coco.py
│ │ │ │ ├── mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py
│ │ │ │ ├── mask_rcnn_r50_caffe_c4_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py
│ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py
│ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py
│ │ │ │ ├── mask_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_1x_wandb_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_2x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_fp16_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_poly_1x_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_2x_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x8d_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py
│ │ │ │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py
│ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_2x_coco.py
│ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── maskformer/
│ │ │ │ ├── maskformer_r50_mstrain_16x1_75e_coco.py
│ │ │ │ ├── maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── ms_rcnn/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── ms_rcnn_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── ms_rcnn_r101_caffe_fpn_2x_coco.py
│ │ │ │ ├── ms_rcnn_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── ms_rcnn_r50_caffe_fpn_2x_coco.py
│ │ │ │ ├── ms_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── ms_rcnn_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── ms_rcnn_x101_64x4d_fpn_1x_coco.py
│ │ │ │ └── ms_rcnn_x101_64x4d_fpn_2x_coco.py
│ │ │ ├── nas_fcos/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py
│ │ │ │ └── nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py
│ │ │ ├── nas_fpn/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── retinanet_r50_fpn_crop640_50e_coco.py
│ │ │ │ └── retinanet_r50_nasfpn_crop640_50e_coco.py
│ │ │ ├── openimages/
│ │ │ │ ├── faster_rcnn_r50_fpn_32x2_1x_openimages.py
│ │ │ │ ├── faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py
│ │ │ │ ├── faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py
│ │ │ │ ├── faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── retinanet_r50_fpn_32x2_1x_openimages.py
│ │ │ │ └── ssd300_32x8_36e_openimages.py
│ │ │ ├── paa/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── paa_r101_fpn_1x_coco.py
│ │ │ │ ├── paa_r101_fpn_2x_coco.py
│ │ │ │ ├── paa_r101_fpn_mstrain_3x_coco.py
│ │ │ │ ├── paa_r50_fpn_1.5x_coco.py
│ │ │ │ ├── paa_r50_fpn_1x_coco.py
│ │ │ │ ├── paa_r50_fpn_2x_coco.py
│ │ │ │ └── paa_r50_fpn_mstrain_3x_coco.py
│ │ │ ├── pafpn/
│ │ │ │ ├── faster_rcnn_r50_pafpn_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── panoptic_fpn/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── panoptic_fpn_r101_fpn_1x_coco.py
│ │ │ │ ├── panoptic_fpn_r101_fpn_mstrain_3x_coco.py
│ │ │ │ ├── panoptic_fpn_r50_fpn_1x_coco.py
│ │ │ │ └── panoptic_fpn_r50_fpn_mstrain_3x_coco.py
│ │ │ ├── pascal_voc/
│ │ │ │ ├── faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py
│ │ │ │ ├── faster_rcnn_r50_fpn_1x_voc0712.py
│ │ │ │ ├── faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py
│ │ │ │ ├── retinanet_r50_fpn_1x_voc0712.py
│ │ │ │ ├── ssd300_voc0712.py
│ │ │ │ └── ssd512_voc0712.py
│ │ │ ├── pisa/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── pisa_faster_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── pisa_mask_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── pisa_retinanet_r50_fpn_1x_coco.py
│ │ │ │ ├── pisa_retinanet_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── pisa_ssd300_coco.py
│ │ │ │ └── pisa_ssd512_coco.py
│ │ │ ├── point_rend/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── point_rend_r50_caffe_fpn_mstrain_1x_coco.py
│ │ │ │ └── point_rend_r50_caffe_fpn_mstrain_3x_coco.py
│ │ │ ├── pvt/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── retinanet_pvt-l_fpn_1x_coco.py
│ │ │ │ ├── retinanet_pvt-m_fpn_1x_coco.py
│ │ │ │ ├── retinanet_pvt-s_fpn_1x_coco.py
│ │ │ │ ├── retinanet_pvt-t_fpn_1x_coco.py
│ │ │ │ ├── retinanet_pvtv2-b0_fpn_1x_coco.py
│ │ │ │ ├── retinanet_pvtv2-b1_fpn_1x_coco.py
│ │ │ │ ├── retinanet_pvtv2-b2_fpn_1x_coco.py
│ │ │ │ ├── retinanet_pvtv2-b3_fpn_1x_coco.py
│ │ │ │ ├── retinanet_pvtv2-b4_fpn_1x_coco.py
│ │ │ │ └── retinanet_pvtv2-b5_fpn_1x_coco.py
│ │ │ ├── queryinst/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
│ │ │ │ ├── queryinst_r101_fpn_mstrain_480-800_3x_coco.py
│ │ │ │ ├── queryinst_r50_fpn_1x_coco.py
│ │ │ │ ├── queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
│ │ │ │ └── queryinst_r50_fpn_mstrain_480-800_3x_coco.py
│ │ │ ├── regnet/
│ │ │ │ ├── cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py
│ │ │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py
│ │ │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-12GF_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-4GF_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py
│ │ │ │ ├── mask_rcnn_regnetx-8GF_fpn_1x_coco.py
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── retinanet_regnetx-1.6GF_fpn_1x_coco.py
│ │ │ │ ├── retinanet_regnetx-3.2GF_fpn_1x_coco.py
│ │ │ │ └── retinanet_regnetx-800MF_fpn_1x_coco.py
│ │ │ ├── reppoints/
│ │ │ │ ├── bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py
│ │ │ │ ├── bbox_r50_grid_fpn_gn-neck+head_1x_coco.py
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py
│ │ │ │ ├── reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py
│ │ │ │ ├── reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py
│ │ │ │ ├── reppoints_moment_r50_fpn_1x_coco.py
│ │ │ │ ├── reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py
│ │ │ │ ├── reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py
│ │ │ │ ├── reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py
│ │ │ │ └── reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py
│ │ │ ├── res2net/
│ │ │ │ ├── cascade_mask_rcnn_r2_101_fpn_20e_coco.py
│ │ │ │ ├── cascade_rcnn_r2_101_fpn_20e_coco.py
│ │ │ │ ├── faster_rcnn_r2_101_fpn_2x_coco.py
│ │ │ │ ├── htc_r2_101_fpn_20e_coco.py
│ │ │ │ ├── mask_rcnn_r2_101_fpn_2x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── resnest/
│ │ │ │ ├── cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│ │ │ │ ├── cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│ │ │ │ ├── cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│ │ │ │ ├── cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│ │ │ │ ├── faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│ │ │ │ ├── faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py
│ │ │ │ ├── mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│ │ │ │ ├── mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── resnet_strikes_back/
│ │ │ │ ├── cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py
│ │ │ │ ├── faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py
│ │ │ │ ├── metafile.yml
│ │ │ │ └── retinanet_r50_fpn_rsb-pretrain_1x_coco.py
│ │ │ ├── retinanet/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── retinanet_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── retinanet_r101_caffe_fpn_mstrain_3x_coco.py
│ │ │ │ ├── retinanet_r101_fpn_1x_coco.py
│ │ │ │ ├── retinanet_r101_fpn_2x_coco.py
│ │ │ │ ├── retinanet_r101_fpn_mstrain_640-800_3x_coco.py
│ │ │ │ ├── retinanet_r18_fpn_1x8_1x_coco.py
│ │ │ │ ├── retinanet_r18_fpn_1x_coco.py
│ │ │ │ ├── retinanet_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── retinanet_r50_caffe_fpn_mstrain_1x_coco.py
│ │ │ │ ├── retinanet_r50_caffe_fpn_mstrain_2x_coco.py
│ │ │ │ ├── retinanet_r50_caffe_fpn_mstrain_3x_coco.py
│ │ │ │ ├── retinanet_r50_fpn_1x_coco.py
│ │ │ │ ├── retinanet_r50_fpn_2x_coco.py
│ │ │ │ ├── retinanet_r50_fpn_90k_coco.py
│ │ │ │ ├── retinanet_r50_fpn_fp16_1x_coco.py
│ │ │ │ ├── retinanet_r50_fpn_mstrain_640-800_3x_coco.py
│ │ │ │ ├── retinanet_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── retinanet_x101_32x4d_fpn_2x_coco.py
│ │ │ │ ├── retinanet_x101_64x4d_fpn_1x_coco.py
│ │ │ │ ├── retinanet_x101_64x4d_fpn_2x_coco.py
│ │ │ │ └── retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py
│ │ │ ├── rpn/
│ │ │ │ ├── rpn_r101_caffe_fpn_1x_coco.py
│ │ │ │ ├── rpn_r101_fpn_1x_coco.py
│ │ │ │ ├── rpn_r101_fpn_2x_coco.py
│ │ │ │ ├── rpn_r50_caffe_c4_1x_coco.py
│ │ │ │ ├── rpn_r50_caffe_fpn_1x_coco.py
│ │ │ │ ├── rpn_r50_fpn_1x_coco.py
│ │ │ │ ├── rpn_r50_fpn_2x_coco.py
│ │ │ │ ├── rpn_x101_32x4d_fpn_1x_coco.py
│ │ │ │ ├── rpn_x101_32x4d_fpn_2x_coco.py
│ │ │ │ ├── rpn_x101_64x4d_fpn_1x_coco.py
│ │ │ │ └── rpn_x101_64x4d_fpn_2x_coco.py
│ │ │ ├── sabl/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── sabl_cascade_rcnn_r101_fpn_1x_coco.py
│ │ │ │ ├── sabl_cascade_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── sabl_faster_rcnn_r101_fpn_1x_coco.py
│ │ │ │ ├── sabl_faster_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── sabl_retinanet_r101_fpn_1x_coco.py
│ │ │ │ ├── sabl_retinanet_r101_fpn_gn_1x_coco.py
│ │ │ │ ├── sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py
│ │ │ │ ├── sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py
│ │ │ │ ├── sabl_retinanet_r50_fpn_1x_coco.py
│ │ │ │ └── sabl_retinanet_r50_fpn_gn_1x_coco.py
│ │ │ ├── scnet/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── scnet_r101_fpn_20e_coco.py
│ │ │ │ ├── scnet_r50_fpn_1x_coco.py
│ │ │ │ ├── scnet_r50_fpn_20e_coco.py
│ │ │ │ ├── scnet_x101_64x4d_fpn_20e_coco.py
│ │ │ │ └── scnet_x101_64x4d_fpn_8x1_20e_coco.py
│ │ │ ├── scratch/
│ │ │ │ ├── faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── seesaw_loss/
│ │ │ │ ├── cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py
│ │ │ │ ├── cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py
│ │ │ │ ├── cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py
│ │ │ │ ├── cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py
│ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py
│ │ │ │ └── metafile.yml
│ │ │ ├── selfsup_pretrain/
│ │ │ │ ├── mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py
│ │ │ │ └── mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py
│ │ │ ├── simple_copy_paste/
│ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py
│ │ │ │ └── metafile.yml
│ │ │ ├── solo/
│ │ │ │ ├── decoupled_solo_light_r50_fpn_3x_coco.py
│ │ │ │ ├── decoupled_solo_r50_fpn_1x_coco.py
│ │ │ │ ├── decoupled_solo_r50_fpn_3x_coco.py
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── solo_r50_fpn_1x_coco.py
│ │ │ │ └── solo_r50_fpn_3x_coco.py
│ │ │ ├── solov2/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── solov2_light_r18_fpn_3x_coco.py
│ │ │ │ ├── solov2_light_r34_fpn_3x_coco.py
│ │ │ │ ├── solov2_light_r50_dcn_fpn_3x_coco.py
│ │ │ │ ├── solov2_light_r50_fpn_3x_coco.py
│ │ │ │ ├── solov2_r101_dcn_fpn_3x_coco.py
│ │ │ │ ├── solov2_r101_fpn_3x_coco.py
│ │ │ │ ├── solov2_r50_fpn_1x_coco.py
│ │ │ │ ├── solov2_r50_fpn_3x_coco.py
│ │ │ │ └── solov2_x101_dcn_fpn_3x_coco.py
│ │ │ ├── sparse_rcnn/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
│ │ │ │ ├── sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py
│ │ │ │ ├── sparse_rcnn_r50_fpn_1x_coco.py
│ │ │ │ ├── sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
│ │ │ │ └── sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py
│ │ │ ├── ssd/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── ssd300_coco.py
│ │ │ │ ├── ssd300_fp16_coco.py
│ │ │ │ ├── ssd512_coco.py
│ │ │ │ ├── ssd512_fp16_coco.py
│ │ │ │ └── ssdlite_mobilenetv2_scratch_600e_coco.py
│ │ │ ├── strong_baselines/
│ │ │ │ ├── mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py
│ │ │ │ ├── mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py
│ │ │ │ ├── mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py
│ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py
│ │ │ │ └── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py
│ │ │ ├── swin/
│ │ │ │ ├── mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py
│ │ │ │ ├── mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py
│ │ │ │ ├── mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py
│ │ │ │ ├── mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py
│ │ │ │ ├── metafile.yml
│ │ │ │ └── retinanet_swin-t-p4-w7_fpn_1x_coco.py
│ │ │ ├── timm_example/
│ │ │ │ ├── retinanet_timm_efficientnet_b1_fpn_1x_coco.py
│ │ │ │ └── retinanet_timm_tv_resnet50_fpn_1x_coco.py
│ │ │ ├── tood/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py
│ │ │ │ ├── tood_r101_fpn_mstrain_2x_coco.py
│ │ │ │ ├── tood_r50_fpn_1x_coco.py
│ │ │ │ ├── tood_r50_fpn_anchor_based_1x_coco.py
│ │ │ │ ├── tood_r50_fpn_mstrain_2x_coco.py
│ │ │ │ ├── tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py
│ │ │ │ └── tood_x101_64x4d_fpn_mstrain_2x_coco.py
│ │ │ ├── tridentnet/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── tridentnet_r50_caffe_1x_coco.py
│ │ │ │ ├── tridentnet_r50_caffe_mstrain_1x_coco.py
│ │ │ │ └── tridentnet_r50_caffe_mstrain_3x_coco.py
│ │ │ ├── vfnet/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── vfnet_r101_fpn_1x_coco.py
│ │ │ │ ├── vfnet_r101_fpn_2x_coco.py
│ │ │ │ ├── vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│ │ │ │ ├── vfnet_r101_fpn_mstrain_2x_coco.py
│ │ │ │ ├── vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│ │ │ │ ├── vfnet_r2_101_fpn_mstrain_2x_coco.py
│ │ │ │ ├── vfnet_r50_fpn_1x_coco.py
│ │ │ │ ├── vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│ │ │ │ ├── vfnet_r50_fpn_mstrain_2x_coco.py
│ │ │ │ ├── vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│ │ │ │ ├── vfnet_x101_32x4d_fpn_mstrain_2x_coco.py
│ │ │ │ ├── vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py
│ │ │ │ └── vfnet_x101_64x4d_fpn_mstrain_2x_coco.py
│ │ │ ├── wider_face/
│ │ │ │ └── ssd300_wider_face.py
│ │ │ ├── yolact/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── yolact_r101_1x8_coco.py
│ │ │ │ ├── yolact_r50_1x8_coco.py
│ │ │ │ └── yolact_r50_8x8_coco.py
│ │ │ ├── yolo/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── yolov3_d53_320_273e_coco.py
│ │ │ │ ├── yolov3_d53_fp16_mstrain-608_273e_coco.py
│ │ │ │ ├── yolov3_d53_mstrain-416_273e_coco.py
│ │ │ │ ├── yolov3_d53_mstrain-608_273e_coco.py
│ │ │ │ ├── yolov3_mobilenetv2_320_300e_coco.py
│ │ │ │ └── yolov3_mobilenetv2_mstrain-416_300e_coco.py
│ │ │ ├── yolof/
│ │ │ │ ├── metafile.yml
│ │ │ │ ├── yolof_r50_c5_8x8_1x_coco.py
│ │ │ │ └── yolof_r50_c5_8x8_iter-1x_coco.py
│ │ │ └── yolox/
│ │ │ ├── metafile.yml
│ │ │ ├── yolox_l_8x8_300e_coco.py
│ │ │ ├── yolox_m_8x8_300e_coco.py
│ │ │ ├── yolox_nano_8x8_300e_coco.py
│ │ │ ├── yolox_s_8x8_300e_coco.py
│ │ │ ├── yolox_tiny_8x8_300e_coco.py
│ │ │ └── yolox_x_8x8_300e_coco.py
│ │ ├── docs/
│ │ │ ├── en/
│ │ │ │ ├── Makefile
│ │ │ │ ├── _static/
│ │ │ │ │ └── css/
│ │ │ │ │ └── readthedocs.css
│ │ │ │ ├── api.rst
│ │ │ │ ├── conf.py
│ │ │ │ ├── index.rst
│ │ │ │ ├── make.bat
│ │ │ │ ├── stat.py
│ │ │ │ └── tutorials/
│ │ │ │ └── index.rst
│ │ │ └── zh_cn/
│ │ │ ├── Makefile
│ │ │ ├── _static/
│ │ │ │ └── css/
│ │ │ │ └── readthedocs.css
│ │ │ ├── api.rst
│ │ │ ├── conf.py
│ │ │ ├── index.rst
│ │ │ ├── make.bat
│ │ │ ├── stat.py
│ │ │ └── tutorials/
│ │ │ └── index.rst
│ │ ├── mmdet/
│ │ │ ├── __init__.py
│ │ │ ├── apis/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── inference.py
│ │ │ │ ├── test.py
│ │ │ │ └── train.py
│ │ │ ├── core/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── anchor/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── anchor_generator.py
│ │ │ │ │ ├── builder.py
│ │ │ │ │ ├── point_generator.py
│ │ │ │ │ └── utils.py
│ │ │ │ ├── bbox/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── assigners/
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── approx_max_iou_assigner.py
│ │ │ │ │ │ ├── assign_result.py
│ │ │ │ │ │ ├── atss_assigner.py
│ │ │ │ │ │ ├── base_assigner.py
│ │ │ │ │ │ ├── center_region_assigner.py
│ │ │ │ │ │ ├── grid_assigner.py
│ │ │ │ │ │ ├── hungarian_assigner.py
│ │ │ │ │ │ ├── mask_hungarian_assigner.py
│ │ │ │ │ │ ├── max_iou_assigner.py
│ │ │ │ │ │ ├── point_assigner.py
│ │ │ │ │ │ ├── region_assigner.py
│ │ │ │ │ │ ├── sim_ota_assigner.py
│ │ │ │ │ │ ├── task_aligned_assigner.py
│ │ │ │ │ │ └── uniform_assigner.py
│ │ │ │ │ ├── builder.py
│ │ │ │ │ ├── coder/
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── base_bbox_coder.py
│ │ │ │ │ │ ├── bucketing_bbox_coder.py
│ │ │ │ │ │ ├── delta_xywh_bbox_coder.py
│ │ │ │ │ │ ├── distance_point_bbox_coder.py
│ │ │ │ │ │ ├── legacy_delta_xywh_bbox_coder.py
│ │ │ │ │ │ ├── pseudo_bbox_coder.py
│ │ │ │ │ │ ├── tblr_bbox_coder.py
│ │ │ │ │ │ └── yolo_bbox_coder.py
│ │ │ │ │ ├── demodata.py
│ │ │ │ │ ├── iou_calculators/
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── builder.py
│ │ │ │ │ │ └── iou2d_calculator.py
│ │ │ │ │ ├── match_costs/
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── builder.py
│ │ │ │ │ │ └── match_cost.py
│ │ │ │ │ ├── samplers/
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── base_sampler.py
│ │ │ │ │ │ ├── combined_sampler.py
│ │ │ │ │ │ ├── instance_balanced_pos_sampler.py
│ │ │ │ │ │ ├── iou_balanced_neg_sampler.py
│ │ │ │ │ │ ├── mask_pseudo_sampler.py
│ │ │ │ │ │ ├── mask_sampling_result.py
│ │ │ │ │ │ ├── ohem_sampler.py
│ │ │ │ │ │ ├── pseudo_sampler.py
│ │ │ │ │ │ ├── random_sampler.py
│ │ │ │ │ │ ├── sampling_result.py
│ │ │ │ │ │ └── score_hlr_sampler.py
│ │ │ │ │ └── transforms.py
│ │ │ │ ├── data_structures/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── general_data.py
│ │ │ │ │ └── instance_data.py
│ │ │ │ ├── evaluation/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── bbox_overlaps.py
│ │ │ │ │ ├── class_names.py
│ │ │ │ │ ├── eval_hooks.py
│ │ │ │ │ ├── mean_ap.py
│ │ │ │ │ ├── panoptic_utils.py
│ │ │ │ │ └── recall.py
│ │ │ │ ├── export/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── model_wrappers.py
│ │ │ │ │ ├── onnx_helper.py
│ │ │ │ │ └── pytorch2onnx.py
│ │ │ │ ├── hook/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── checkloss_hook.py
│ │ │ │ │ ├── ema.py
│ │ │ │ │ ├── memory_profiler_hook.py
│ │ │ │ │ ├── set_epoch_info_hook.py
│ │ │ │ │ ├── sync_norm_hook.py
│ │ │ │ │ ├── sync_random_size_hook.py
│ │ │ │ │ ├── wandblogger_hook.py
│ │ │ │ │ ├── yolox_lrupdater_hook.py
│ │ │ │ │ └── yolox_mode_switch_hook.py
│ │ │ │ ├── mask/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── mask_target.py
│ │ │ │ │ ├── structures.py
│ │ │ │ │ └── utils.py
│ │ │ │ ├── optimizers/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── builder.py
│ │ │ │ │ └── layer_decay_optimizer_constructor.py
│ │ │ │ ├── post_processing/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── bbox_nms.py
│ │ │ │ │ ├── matrix_nms.py
│ │ │ │ │ └── merge_augs.py
│ │ │ │ ├── utils/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── dist_utils.py
│ │ │ │ │ └── misc.py
│ │ │ │ └── visualization/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── image.py
│ │ │ │ └── palette.py
│ │ │ ├── models/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── backbones/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── csp_darknet.py
│ │ │ │ │ ├── darknet.py
│ │ │ │ │ ├── detectors_resnet.py
│ │ │ │ │ ├── detectors_resnext.py
│ │ │ │ │ ├── efficientnet.py
│ │ │ │ │ ├── hourglass.py
│ │ │ │ │ ├── hrnet.py
│ │ │ │ │ ├── mobilenet_v2.py
│ │ │ │ │ ├── pvt.py
│ │ │ │ │ ├── regnet.py
│ │ │ │ │ ├── res2net.py
│ │ │ │ │ ├── resnest.py
│ │ │ │ │ ├── resnet.py
│ │ │ │ │ ├── resnext.py
│ │ │ │ │ ├── ssd_vgg.py
│ │ │ │ │ ├── swin.py
│ │ │ │ │ └── trident_resnet.py
│ │ │ │ ├── builder.py
│ │ │ │ ├── dense_heads/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── anchor_free_head.py
│ │ │ │ │ ├── anchor_head.py
│ │ │ │ │ ├── atss_head.py
│ │ │ │ │ ├── autoassign_head.py
│ │ │ │ │ ├── base_dense_head.py
│ │ │ │ │ ├── base_mask_head.py
│ │ │ │ │ ├── cascade_rpn_head.py
│ │ │ │ │ ├── centernet_head.py
│ │ │ │ │ ├── centripetal_head.py
│ │ │ │ │ ├── corner_head.py
│ │ │ │ │ ├── ddod_head.py
│ │ │ │ │ ├── deformable_detr_head.py
│ │ │ │ │ ├── dense_test_mixins.py
│ │ │ │ │ ├── detr_head.py
│ │ │ │ │ ├── embedding_rpn_head.py
│ │ │ │ │ ├── fcos_head.py
│ │ │ │ │ ├── fovea_head.py
│ │ │ │ │ ├── free_anchor_retina_head.py
│ │ │ │ │ ├── fsaf_head.py
│ │ │ │ │ ├── ga_retina_head.py
│ │ │ │ │ ├── ga_rpn_head.py
│ │ │ │ │ ├── gfl_head.py
│ │ │ │ │ ├── guided_anchor_head.py
│ │ │ │ │ ├── lad_head.py
│ │ │ │ │ ├── ld_head.py
│ │ │ │ │ ├── mask2former_head.py
│ │ │ │ │ ├── maskformer_head.py
│ │ │ │ │ ├── nasfcos_head.py
│ │ │ │ │ ├── paa_head.py
│ │ │ │ │ ├── pisa_retinanet_head.py
│ │ │ │ │ ├── pisa_ssd_head.py
│ │ │ │ │ ├── reppoints_head.py
│ │ │ │ │ ├── retina_head.py
│ │ │ │ │ ├── retina_sepbn_head.py
│ │ │ │ │ ├── rpn_head.py
│ │ │ │ │ ├── sabl_retina_head.py
│ │ │ │ │ ├── solo_head.py
│ │ │ │ │ ├── solov2_head.py
│ │ │ │ │ ├── ssd_head.py
│ │ │ │ │ ├── tood_head.py
│ │ │ │ │ ├── vfnet_head.py
│ │ │ │ │ ├── yolact_head.py
│ │ │ │ │ ├── yolo_head.py
│ │ │ │ │ ├── yolof_head.py
│ │ │ │ │ └── yolox_head.py
│ │ │ │ ├── detectors/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── atss.py
│ │ │ │ │ ├── autoassign.py
│ │ │ │ │ ├── base.py
│ │ │ │ │ ├── cascade_rcnn.py
│ │ │ │ │ ├── centernet.py
│ │ │ │ │ ├── cornernet.py
│ │ │ │ │ ├── ddod.py
│ │ │ │ │ ├── deformable_detr.py
│ │ │ │ │ ├── detr.py
│ │ │ │ │ ├── fast_rcnn.py
│ │ │ │ │ ├── faster_rcnn.py
│ │ │ │ │ ├── fcos.py
│ │ │ │ │ ├── fovea.py
│ │ │ │ │ ├── fsaf.py
│ │ │ │ │ ├── gfl.py
│ │ │ │ │ ├── grid_rcnn.py
│ │ │ │ │ ├── htc.py
│ │ │ │ │ ├── kd_one_stage.py
│ │ │ │ │ ├── lad.py
│ │ │ │ │ ├── mask2former.py
│ │ │ │ │ ├── mask_rcnn.py
│ │ │ │ │ ├── mask_scoring_rcnn.py
│ │ │ │ │ ├── maskformer.py
│ │ │ │ │ ├── nasfcos.py
│ │ │ │ │ ├── paa.py
│ │ │ │ │ ├── panoptic_fpn.py
│ │ │ │ │ ├── panoptic_two_stage_segmentor.py
│ │ │ │ │ ├── point_rend.py
│ │ │ │ │ ├── queryinst.py
│ │ │ │ │ ├── reppoints_detector.py
│ │ │ │ │ ├── retinanet.py
│ │ │ │ │ ├── rpn.py
│ │ │ │ │ ├── scnet.py
│ │ │ │ │ ├── single_stage.py
│ │ │ │ │ ├── single_stage_instance_seg.py
│ │ │ │ │ ├── solo.py
│ │ │ │ │ ├── solov2.py
│ │ │ │ │ ├── sparse_rcnn.py
│ │ │ │ │ ├── tood.py
│ │ │ │ │ ├── trident_faster_rcnn.py
│ │ │ │ │ ├── two_stage.py
│ │ │ │ │ ├── vfnet.py
│ │ │ │ │ ├── yolact.py
│ │ │ │ │ ├── yolo.py
│ │ │ │ │ ├── yolof.py
│ │ │ │ │ └── yolox.py
│ │ │ │ ├── losses/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── accuracy.py
│ │ │ │ │ ├── ae_loss.py
│ │ │ │ │ ├── balanced_l1_loss.py
│ │ │ │ │ ├── cross_entropy_loss.py
│ │ │ │ │ ├── dice_loss.py
│ │ │ │ │ ├── focal_loss.py
│ │ │ │ │ ├── gaussian_focal_loss.py
│ │ │ │ │ ├── gfocal_loss.py
│ │ │ │ │ ├── ghm_loss.py
│ │ │ │ │ ├── iou_loss.py
│ │ │ │ │ ├── kd_loss.py
│ │ │ │ │ ├── mse_loss.py
│ │ │ │ │ ├── pisa_loss.py
│ │ │ │ │ ├── seesaw_loss.py
│ │ │ │ │ ├── smooth_l1_loss.py
│ │ │ │ │ ├── utils.py
│ │ │ │ │ └── varifocal_loss.py
│ │ │ │ ├── necks/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── bfp.py
│ │ │ │ │ ├── channel_mapper.py
│ │ │ │ │ ├── ct_resnet_neck.py
│ │ │ │ │ ├── dilated_encoder.py
│ │ │ │ │ ├── dyhead.py
│ │ │ │ │ ├── fpg.py
│ │ │ │ │ ├── fpn.py
│ │ │ │ │ ├── fpn_carafe.py
│ │ │ │ │ ├── hrfpn.py
│ │ │ │ │ ├── nas_fpn.py
│ │ │ │ │ ├── nasfcos_fpn.py
│ │ │ │ │ ├── pafpn.py
│ │ │ │ │ ├── rfp.py
│ │ │ │ │ ├── ssd_neck.py
│ │ │ │ │ ├── yolo_neck.py
│ │ │ │ │ └── yolox_pafpn.py
│ │ │ │ ├── plugins/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── dropblock.py
│ │ │ │ │ ├── msdeformattn_pixel_decoder.py
│ │ │ │ │ └── pixel_decoder.py
│ │ │ │ ├── roi_heads/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base_roi_head.py
│ │ │ │ │ ├── bbox_heads/
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── bbox_head.py
│ │ │ │ │ │ ├── convfc_bbox_head.py
│ │ │ │ │ │ ├── dii_head.py
│ │ │ │ │ │ ├── double_bbox_head.py
│ │ │ │ │ │ ├── sabl_head.py
│ │ │ │ │ │ └── scnet_bbox_head.py
│ │ │ │ │ ├── cascade_roi_head.py
│ │ │ │ │ ├── double_roi_head.py
│ │ │ │ │ ├── dynamic_roi_head.py
│ │ │ │ │ ├── grid_roi_head.py
│ │ │ │ │ ├── htc_roi_head.py
│ │ │ │ │ ├── mask_heads/
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── coarse_mask_head.py
│ │ │ │ │ │ ├── dynamic_mask_head.py
│ │ │ │ │ │ ├── fcn_mask_head.py
│ │ │ │ │ │ ├── feature_relay_head.py
│ │ │ │ │ │ ├── fused_semantic_head.py
│ │ │ │ │ │ ├── global_context_head.py
│ │ │ │ │ │ ├── grid_head.py
│ │ │ │ │ │ ├── htc_mask_head.py
│ │ │ │ │ │ ├── mask_point_head.py
│ │ │ │ │ │ ├── maskiou_head.py
│ │ │ │ │ │ ├── scnet_mask_head.py
│ │ │ │ │ │ └── scnet_semantic_head.py
│ │ │ │ │ ├── mask_scoring_roi_head.py
│ │ │ │ │ ├── pisa_roi_head.py
│ │ │ │ │ ├── point_rend_roi_head.py
│ │ │ │ │ ├── roi_extractors/
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ ├── base_roi_extractor.py
│ │ │ │ │ │ ├── generic_roi_extractor.py
│ │ │ │ │ │ └── single_level_roi_extractor.py
│ │ │ │ │ ├── scnet_roi_head.py
│ │ │ │ │ ├── shared_heads/
│ │ │ │ │ │ ├── __init__.py
│ │ │ │ │ │ └── res_layer.py
│ │ │ │ │ ├── sparse_roi_head.py
│ │ │ │ │ ├── standard_roi_head.py
│ │ │ │ │ ├── test_mixins.py
│ │ │ │ │ └── trident_roi_head.py
│ │ │ │ ├── seg_heads/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base_semantic_head.py
│ │ │ │ │ ├── panoptic_fpn_head.py
│ │ │ │ │ └── panoptic_fusion_heads/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── base_panoptic_fusion_head.py
│ │ │ │ │ ├── heuristic_fusion_head.py
│ │ │ │ │ └── maskformer_fusion_head.py
│ │ │ │ └── utils/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── brick_wrappers.py
│ │ │ │ ├── builder.py
│ │ │ │ ├── ckpt_convert.py
│ │ │ │ ├── conv_upsample.py
│ │ │ │ ├── csp_layer.py
│ │ │ │ ├── gaussian_target.py
│ │ │ │ ├── inverted_residual.py
│ │ │ │ ├── make_divisible.py
│ │ │ │ ├── misc.py
│ │ │ │ ├── normed_predictor.py
│ │ │ │ ├── panoptic_gt_processing.py
│ │ │ │ ├── point_sample.py
│ │ │ │ ├── positional_encoding.py
│ │ │ │ ├── res_layer.py
│ │ │ │ ├── se_layer.py
│ │ │ │ └── transformer.py
│ │ │ ├── utils/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── collect_env.py
│ │ │ │ ├── compat_config.py
│ │ │ │ ├── contextmanagers.py
│ │ │ │ ├── logger.py
│ │ │ │ ├── memory.py
│ │ │ │ ├── misc.py
│ │ │ │ ├── profiling.py
│ │ │ │ ├── replace_cfg_vals.py
│ │ │ │ ├── setup_env.py
│ │ │ │ ├── split_batch.py
│ │ │ │ ├── util_distribution.py
│ │ │ │ ├── util_mixins.py
│ │ │ │ └── util_random.py
│ │ │ └── version.py
│ │ ├── model-index.yml
│ │ ├── pytest.ini
│ │ ├── requirements/
│ │ │ ├── albu.txt
│ │ │ ├── build.txt
│ │ │ ├── docs.txt
│ │ │ ├── mminstall.txt
│ │ │ ├── optional.txt
│ │ │ ├── readthedocs.txt
│ │ │ ├── runtime.txt
│ │ │ └── tests.txt
│ │ ├── requirements.txt
│ │ ├── setup.cfg
│ │ ├── setup.py
│ │ ├── tests/
│ │ │ ├── test_data/
│ │ │ │ ├── test_datasets/
│ │ │ │ │ ├── test_coco_dataset.py
│ │ │ │ │ ├── test_common.py
│ │ │ │ │ ├── test_custom_dataset.py
│ │ │ │ │ ├── test_dataset_wrapper.py
│ │ │ │ │ ├── test_openimages_dataset.py
│ │ │ │ │ ├── test_panoptic_dataset.py
│ │ │ │ │ └── test_xml_dataset.py
│ │ │ │ ├── test_pipelines/
│ │ │ │ │ ├── test_formatting.py
│ │ │ │ │ ├── test_loading.py
│ │ │ │ │ ├── test_sampler.py
│ │ │ │ │ └── test_transform/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── test_img_augment.py
│ │ │ │ │ ├── test_models_aug_test.py
│ │ │ │ │ ├── test_rotate.py
│ │ │ │ │ ├── test_shear.py
│ │ │ │ │ ├── test_transform.py
│ │ │ │ │ ├── test_translate.py
│ │ │ │ │ └── utils.py
│ │ │ │ └── test_utils.py
│ │ │ ├── test_downstream/
│ │ │ │ └── test_mmtrack.py
│ │ │ ├── test_metrics/
│ │ │ │ ├── test_box_overlap.py
│ │ │ │ ├── test_losses.py
│ │ │ │ ├── test_mean_ap.py
│ │ │ │ └── test_recall.py
│ │ │ ├── test_models/
│ │ │ │ ├── test_backbones/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── test_csp_darknet.py
│ │ │ │ │ ├── test_detectors_resnet.py
│ │ │ │ │ ├── test_efficientnet.py
│ │ │ │ │ ├── test_hourglass.py
│ │ │ │ │ ├── test_hrnet.py
│ │ │ │ │ ├── test_mobilenet_v2.py
│ │ │ │ │ ├── test_pvt.py
│ │ │ │ │ ├── test_regnet.py
│ │ │ │ │ ├── test_renext.py
│ │ │ │ │ ├── test_res2net.py
│ │ │ │ │ ├── test_resnest.py
│ │ │ │ │ ├── test_resnet.py
│ │ │ │ │ ├── test_swin.py
│ │ │ │ │ ├── test_trident_resnet.py
│ │ │ │ │ └── utils.py
│ │ │ │ ├── test_dense_heads/
│ │ │ │ │ ├── test_anchor_head.py
│ │ │ │ │ ├── test_atss_head.py
│ │ │ │ │ ├── test_autoassign_head.py
│ │ │ │ │ ├── test_centernet_head.py
│ │ │ │ │ ├── test_corner_head.py
│ │ │ │ │ ├── test_ddod_head.py
│ │ │ │ │ ├── test_dense_heads_attr.py
│ │ │ │ │ ├── test_detr_head.py
│ │ │ │ │ ├── test_fcos_head.py
│ │ │ │ │ ├── test_fsaf_head.py
│ │ │ │ │ ├── test_ga_anchor_head.py
│ │ │ │ │ ├── test_gfl_head.py
│ │ │ │ │ ├── test_lad_head.py
│ │ │ │ │ ├── test_ld_head.py
│ │ │ │ │ ├── test_mask2former_head.py
│ │ │ │ │ ├── test_maskformer_head.py
│ │ │ │ │ ├── test_paa_head.py
│ │ │ │ │ ├── test_pisa_head.py
│ │ │ │ │ ├── test_sabl_retina_head.py
│ │ │ │ │ ├── test_solo_head.py
│ │ │ │ │ ├── test_tood_head.py
│ │ │ │ │ ├── test_vfnet_head.py
│ │ │ │ │ ├── test_yolact_head.py
│ │ │ │ │ ├── test_yolof_head.py
│ │ │ │ │ └── test_yolox_head.py
│ │ │ │ ├── test_forward.py
│ │ │ │ ├── test_loss.py
│ │ │ │ ├── test_loss_compatibility.py
│ │ │ │ ├── test_necks.py
│ │ │ │ ├── test_plugins.py
│ │ │ │ ├── test_roi_heads/
│ │ │ │ │ ├── __init__.py
│ │ │ │ │ ├── test_bbox_head.py
│ │ │ │ │ ├── test_mask_head.py
│ │ │ │ │ ├── test_roi_extractor.py
│ │ │ │ │ ├── test_sabl_bbox_head.py
│ │ │ │ │ └── utils.py
│ │ │ │ ├── test_seg_heads/
│ │ │ │ │ └── test_maskformer_fusion_head.py
│ │ │ │ └── test_utils/
│ │ │ │ ├── test_brick_wrappers.py
│ │ │ │ ├── test_conv_upsample.py
│ │ │ │ ├── test_inverted_residual.py
│ │ │ │ ├── test_model_misc.py
│ │ │ │ ├── test_position_encoding.py
│ │ │ │ ├── test_se_layer.py
│ │ │ │ └── test_transformer.py
│ │ │ ├── test_onnx/
│ │ │ │ ├── __init__.py
│ │ │ │ ├── test_head.py
│ │ │ │ ├── test_neck.py
│ │ │ │ └── utils.py
│ │ │ ├── test_runtime/
│ │ │ │ ├── async_benchmark.py
│ │ │ │ ├── test_apis.py
│ │ │ │ ├── test_async.py
│ │ │ │ ├── test_config.py
│ │ │ │ ├── test_eval_hook.py
│ │ │ │ └── test_fp16.py
│ │ │ └── test_utils/
│ │ │ ├── test_anchor.py
│ │ │ ├── test_assigner.py
│ │ │ ├── test_coder.py
│ │ │ ├── test_compat_config.py
│ │ │ ├── test_general_data.py
│ │ │ ├── test_hook.py
│ │ │ ├── test_layer_decay_optimizer_constructor.py
│ │ │ ├── test_logger.py
│ │ │ ├── test_masks.py
│ │ │ ├── test_memory.py
│ │ │ ├── test_misc.py
│ │ │ ├── test_nms.py
│ │ │ ├── test_replace_cfg_vals.py
│ │ │ ├── test_setup_env.py
│ │ │ ├── test_split_batch.py
│ │ │ ├── test_version.py
│ │ │ └── test_visualization.py
│ │ └── tools/
│ │ ├── analysis_tools/
│ │ │ ├── analyze_logs.py
│ │ │ ├── analyze_results.py
│ │ │ ├── benchmark.py
│ │ │ ├── coco_error_analysis.py
│ │ │ ├── confusion_matrix.py
│ │ │ ├── eval_metric.py
│ │ │ ├── get_flops.py
│ │ │ ├── optimize_anchors.py
│ │ │ ├── robustness_eval.py
│ │ │ └── test_robustness.py
│ │ ├── dataset_converters/
│ │ │ ├── cityscapes.py
│ │ │ ├── images2coco.py
│ │ │ └── pascal_voc.py
│ │ ├── deployment/
│ │ │ ├── mmdet2torchserve.py
│ │ │ ├── mmdet_handler.py
│ │ │ ├── onnx2tensorrt.py
│ │ │ ├── pytorch2onnx.py
│ │ │ ├── test.py
│ │ │ └── test_torchserver.py
│ │ ├── dist_test.sh
│ │ ├── dist_train.sh
│ │ ├── misc/
│ │ │ ├── browse_dataset.py
│ │ │ ├── download_dataset.py
│ │ │ ├── gen_coco_panoptic_test_info.py
│ │ │ ├── get_image_metas.py
│ │ │ ├── print_config.py
│ │ │ └── split_coco.py
│ │ ├── model_converters/
│ │ │ ├── detectron2pytorch.py
│ │ │ ├── publish_model.py
│ │ │ ├── regnet2mmdet.py
│ │ │ ├── selfsup2mmdet.py
│ │ │ ├── upgrade_model_version.py
│ │ │ └── upgrade_ssd_version.py
│ │ ├── slurm_test.sh
│ │ ├── slurm_train.sh
│ │ ├── test.py
│ │ └── train.py
│ ├── models_menu/
│ │ ├── mmscraper.py
│ │ ├── models_json.json
│ │ ├── samScraper.py
│ │ └── sam_models.json
│ ├── setup.py
│ ├── tempCodeRunnerFile.py
│ └── trackers/
│ ├── __init__.py
│ ├── botsort/
│ │ ├── basetrack.py
│ │ ├── bot_sort.py
│ │ ├── configs/
│ │ │ └── botsort.yaml
│ │ ├── gmc.py
│ │ ├── kalman_filter.py
│ │ ├── matching.py
│ │ └── reid_multibackend.py
│ ├── bytetrack/
│ │ ├── basetrack.py
│ │ ├── byte_tracker.py
│ │ ├── configs/
│ │ │ └── bytetrack.yaml
│ │ ├── kalman_filter.py
│ │ └── matching.py
│ ├── deepocsort/
│ │ ├── __init__.py
│ │ ├── args.py
│ │ ├── association.py
│ │ ├── cmc.py
│ │ ├── configs/
│ │ │ └── deepocsort.yaml
│ │ ├── embedding.py
│ │ ├── kalmanfilter.py
│ │ ├── ocsort.py
│ │ └── reid_multibackend.py
│ ├── multi_tracker_zoo.py
│ ├── ocsort/
│ │ ├── association.py
│ │ ├── configs/
│ │ │ └── ocsort.yaml
│ │ ├── kalmanfilter.py
│ │ └── ocsort.py
│ ├── reid_export.py
│ └── strongsort/
│ ├── .gitignore
│ ├── __init__.py
│ ├── configs/
│ │ └── strongsort.yaml
│ ├── deep/
│ │ ├── checkpoint/
│ │ │ └── .gitkeep
│ │ ├── models/
│ │ │ ├── __init__.py
│ │ │ ├── densenet.py
│ │ │ ├── hacnn.py
│ │ │ ├── inceptionresnetv2.py
│ │ │ ├── inceptionv4.py
│ │ │ ├── mlfn.py
│ │ │ ├── mobilenetv2.py
│ │ │ ├── mudeep.py
│ │ │ ├── nasnet.py
│ │ │ ├── osnet.py
│ │ │ ├── osnet_ain.py
│ │ │ ├── pcb.py
│ │ │ ├── resnet.py
│ │ │ ├── resnet_ibn_a.py
│ │ │ ├── resnet_ibn_b.py
│ │ │ ├── resnetmid.py
│ │ │ ├── senet.py
│ │ │ ├── shufflenet.py
│ │ │ ├── shufflenetv2.py
│ │ │ ├── squeezenet.py
│ │ │ └── xception.py
│ │ └── reid_model_factory.py
│ ├── reid_multibackend.py
│ ├── sort/
│ │ ├── __init__.py
│ │ ├── detection.py
│ │ ├── iou_matching.py
│ │ ├── kalman_filter.py
│ │ ├── linear_assignment.py
│ │ ├── nn_matching.py
│ │ ├── preprocessing.py
│ │ ├── track.py
│ │ └── tracker.py
│ ├── strong_sort.py
│ └── utils/
│ ├── __init__.py
│ ├── asserts.py
│ ├── draw.py
│ ├── evaluation.py
│ ├── io.py
│ ├── json_logger.py
│ ├── log.py
│ ├── parser.py
│ └── tools.py
├── LICENSE
├── MANIFEST.in
├── README.md
├── additional_scripts/
│ └── coco_eval.py
├── docs/
│ ├── Installation/
│ │ ├── executable.md
│ │ ├── full installation.md
│ │ ├── index.yml
│ │ └── problems.md
│ ├── index.md
│ ├── main_features/
│ │ ├── Export.md
│ │ ├── SAM.md
│ │ ├── index.yml
│ │ ├── inputs.md
│ │ ├── segmentation.md
│ │ └── tracking/
│ │ ├── index.yml
│ │ ├── interpolation.md
│ │ └── tracking.md
│ ├── model_selection/
│ │ ├── index.yml
│ │ ├── merge.md
│ │ └── model_explorer.md
│ ├── retype.yml
│ └── user_interface.md
├── releasenotes.md
├── requirements.txt
├── setup.py
└── yolo training commands.txt
================================================
FILE CONTENTS
================================================
================================================
FILE: .github/workflows/retype-action.yml
================================================
name: Publish Retype powered website to GitHub Pages
on:
workflow_dispatch:
push:
branches:
- master
jobs:
publish:
name: Publish to retype branch
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- uses: actions/checkout@v2
- uses: retypeapp/action-build@latest
- uses: retypeapp/action-github-pages@latest
with:
update-branch: true
================================================
FILE: .gitignore
================================================
*.pt
*.pth
# anything in the folders : datasets , runs
runs/
*.lnk
*.ps1
*.docx
================================================
FILE: DLTA_AI_app/.flake8
================================================
[flake8]
exclude = .anaconda3/*
ignore = E203, E741, W503, W504
================================================
FILE: DLTA_AI_app/.gitignore
================================================
/.cache/
/.pytest_cache/
/build/
/dist/
/*.egg-info/
*.py[cdo]
.DS_Store
.idea/
# mp4
*.mp4
# any thing in the folder test_videos
test_videos/*
saved_models.json
================================================
FILE: DLTA_AI_app/.gitmodules
================================================
[submodule "github2pypi"]
path = github2pypi
url = https://github.com/wkentaro/github2pypi.git
================================================
FILE: DLTA_AI_app/__main__.py
================================================
import os
import sys
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
os.chdir(os.path.dirname(os.path.realpath(__file__)))
from PyQt6 import QtGui, QtWidgets, QtCore
from labelme import __appname__
from labelme import __version__
from labelme.utils import newIcon
import qdarktheme
def main():
app = QtWidgets.QApplication(sys.argv)
QtWidgets.QApplication.setHighDpiScaleFactorRoundingPolicy(QtCore.Qt.HighDpiScaleFactorRoundingPolicy.RoundPreferFloor)
app.setApplicationName(__appname__)
app.setWindowIcon(newIcon("icon"))
# create and show splash screen
splash_pix = QtGui.QPixmap('labelme/icons/splash_screen.png')
splash = QtWidgets.QSplashScreen(splash_pix)
# center the splash screen to the original screen size
try:
from screeninfo import get_monitors
original_width = get_monitors()[0].width
original_heigth = get_monitors()[0].height
slapsh_width = splash.width()
splash_height = splash.height()
splash.move(int((original_width - slapsh_width) / 2), int((original_heigth - splash_height) / 2))
except Exception as e:
pass
splash.show()
qss = """
QMenuBar::item {
padding: 10px;
margin: 0 5px
}
QMenu{
border-radius: 5px;
}
QMenu::item{
padding: 8px;
margin: 5px;
border-radius: 5px;
}
QToolTip {
color: #111111;
background-color: #EEEEEE;
}
QCheckBox{
margin: 0 7px;
}
QComboBox{
font-size: 10pt;
font-weight: bold;
}
"""
try:
import yaml
with open ("labelme/config/default_config.yaml", "r") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
qdarktheme.setup_theme(theme = config["theme"], default_theme = "dark", additional_qss=qss)
except Exception as e:
print(f"ERROR {e}")
# create main window
from labelme.app import MainWindow
win = MainWindow()
splash.finish(win)
win.showMaximized()
# close splash screen
win.raise_()
sys.exit(app.exec())
# this main block is required to generate executable by pyinstaller
if __name__ == "__main__":
main()
================================================
FILE: DLTA_AI_app/__main__.spec
================================================
# -*- mode: python -*-
# vim: ft=python
from glob import glob
block_cipher = None
datas_list = [
('models_menu/*.json', 'models_menu'),
('models_menu/*.py', 'models_menu'),
('ultralytics/' , 'ultralytics'),
('labelme/' , 'labelme'),
('mmdetection/' , 'mmdetection'),
('trackers/' , 'trackers')
]
hiddenimports_list = [
'mmcv' ,
'mmcv._ext',
'torchvision']
a = Analysis(
['__main__.py'],
pathex=[],
binaries=[],
datas=datas_list,
hiddenimports=hiddenimports_list,
hookspath=[],
hooksconfig={},
runtime_hooks=[],
excludes=[],
win_no_prefer_redirects=False,
win_private_assemblies=False,
cipher=block_cipher,
noarchive=False,
)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(
pyz,
a.scripts,
[],
exclude_binaries=True,
name='DLTA-AI',
debug=False,
bootloader_ignore_signals=False,
strip=False,
upx=True,
console=True,
disable_windowed_traceback=False,
argv_emulation=False,
target_arch=None,
codesign_identity=None,
entitlements_file=None,
icon = "C:\Graduation Project\Auto Annotation Tool\DLTA-AI\DLTA-AI-app\labelme\icons\icon.png"
)
coll = COLLECT(
exe,
a.binaries,
a.zipfiles,
a.datas,
strip=False,
upx=True,
upx_exclude=[],
name='DLTA-AI',
)
================================================
FILE: DLTA_AI_app/inferencing.py
================================================
import copy
from supervision.detection.core import Detections
from time import time
import torch
from mmdet.apis import inference_detector, init_detector, async_inference_detector
import cv2
import numpy as np
import matplotlib.pyplot as plt
import warnings
# from ultralytics.yolo.utils.ops import Profile, non_max_suppression, scale_boxes, process_mask, process_mask_native
from labelme.utils.helpers import mathOps
warnings.filterwarnings("ignore")
class models_inference():
def __init__(self):
self.annotating_models = {}
def full_points(bbox):
return np.array([[bbox[0], bbox[1]], [bbox[0], bbox[3]], [bbox[2], bbox[3]], [bbox[2], bbox[1]]])
@torch.no_grad()
def decode_file(self, img, model, classdict, threshold=0.3, img_array_flag=False):
if model.__class__.__name__ == "YOLO":
if isinstance(img, str):
img = cv2.imread(img)
# get image size
img_resized = cv2.resize (img , (640, 640))
# default yolo arguments from yolov8 tracking repo
# imgsz=(640, 640), # inference size (height, width)
# conf_thres=0.25, # confidence threshold
# iou_thres=0.45, # NMS IOU threshold
# max_det=1000, # maximum detections per image
results = model(img_resized , conf = 0.25 , iou= 0.45 , verbose = False)
results = results[0]
# if len results is 0 then return empty dict
if results.masks is None:
return {"results": {}}
masks = results.masks.cpu().numpy().masks
masks = masks > 0.0
org_size = img.shape[:2]
out_size = masks.shape[1:]
# print(f'org_size : {org_size} , out_size : {out_size}')
# convert boxes to original image size same as the masks (coords = coords * org_size / out_size)
boxes = results.boxes.xyxy.cpu().numpy()
boxes = boxes * np.array([org_size[1] / out_size[1], org_size[0] /
out_size[0], org_size[1] / out_size[1], org_size[0] / out_size[0]])
detections = Detections(
xyxy=boxes,
confidence=results.boxes.conf.cpu().numpy(),
class_id=results.boxes.cls.cpu().numpy().astype(int)
)
polygons = []
result_dict = {}
resize_factors = [org_size[0] / out_size[0] , org_size[1] / out_size[1]]
if len(masks) == 0:
return {"results":{}}
for mask in masks:
polygon = mathOps.mask_to_polygons(
mask, resize_factors=resize_factors)
polygons.append(polygon)
# detection is a tuple of (box, confidence, class_id, tracker_id)
ind = 0
res_list = []
for detection in detections:
if round(detection[1], 2) < float(threshold):
continue
result = {}
result["class"] = classdict.get(int(detection[2]))
result["confidence"] = str(round(detection[1], 2))
result["bbox"] = detection[0].astype(int)
result["seg"] = polygons[ind]
ind += 1
if result["class"] == None:
continue
if len(result["seg"]) < 3:
continue
res_list.append(result)
result_dict["results"] = res_list
return result_dict
if img_array_flag:
results = inference_detector(model, img)
else:
results = inference_detector(model, plt.imread(img))
# results = async_inference_detector(model, plt.imread(img_path))
torch.cuda.empty_cache()
results0 = []
results1 = []
for i in classdict.keys():
mask = results[0][i][:, 4] >= float(threshold)
results0.append(results[0][i][mask])
results1.append(list(np.array(results[1][i])[mask]))
# for i in classdict.keys():
# results0.append(results[0][i])
# results1.append(results[1][i])
# self.annotating_models[model.__class__.__name__] = [results0 , results1]
# print(self.annotating_models.keys())
# # if the length of the annotating_models is greater than 1 we need to merge the masks
# if len(self.annotating_models.keys()) > 1:
# print("merging masks")
# results0,results1 = self.merge_masks()
# assert len(results0) == len(results1)
# for i in range(len(results0)):
# assert len(results0[i]) == len(results1[i])
return results0, results1
def polegonise(self, results0, results1, classdict, threshold=0.3, show_bbox_flag=False):
result_dict = {}
res_list = []
self.classes_numbering = [keyno for keyno in classdict.keys()]
# print(self.classes_numbering)
for classno in range(len(results0)):
for instance in range(len(results0[classno])):
if float(results0[classno][instance][-1]) < float(threshold):
continue
result = {}
result["class"] = classdict.get(
self.classes_numbering[classno])
# Confidence
result["confidence"] = str(
round(results0[classno][instance][-1], 2))
if classno == 0:
result["seg"] = mathOps.mask_to_polygons(
results1[classno][instance].astype(np.uint8), 10)
else:
result["seg"] = mathOps.mask_to_polygons(
results1[classno][instance].astype(np.uint8), 25)
# result["bbox"] = self.get_bbox(result["seg"])
if show_bbox_flag:
# result["bbox"] = full_points(result["bbox"]).tolist()
# points = full_points(result["bbox"])
# result["x1"] = points[0][0]
# result["y1"] = points[0][1]
# result["x2"] = points[1][0]
# result["y2"] = points[1][1]
# result["x3"] = points[2][0]
# result["y3"] = points[2][1]
# result["x4"] = points[3][0]
# result["y4"] = points[3][1]
pass
if result["class"] == None:
continue
if len(result["seg"]) < 3:
continue
res_list.append(result)
result_dict["results"] = res_list
return result_dict
def merge_masks(self):
tic = time()
result0 = []
result1 = []
# Counting for debugging purposes
# count the number of instances in each model
counts = count_instances(self.annotating_models)
# print the counts of each model
for model in counts.keys():
print("model {} has {} instances".format(model, counts[model]))
# the following lines can be used if we use models with different number of classes
# classnos = []
# for model in self.annotating_models.keys():
# classnos.append(len(self.annotating_models[model][1]))
# print(classnos)
# instead the following line of code will be used if we use models with the same number of classes
classnos = len(self.annotating_models[list(
self.annotating_models.keys())[0]][1])
merged_counts = 0
# initialize the result list with the same number of classes as the model with the most classes
for i in range(classnos):
result1.append([])
result0.append([])
# deep copy the annotating_models dict to pop all the masks we have merged (try delete it for future optimisation)
annotating_models_copy = copy.deepcopy(self.annotating_models)
# merge masks of the same class
for idx1, model in enumerate(self.annotating_models.keys()):
for classno in range(len(self.annotating_models[model][1])):
# check if an instance exists in the model in this class
if len(self.annotating_models[model][1][classno]) > 0:
for instance in range(len(self.annotating_models[model][1][classno])):
for idx2, model2 in enumerate(self.annotating_models.keys()):
if model != model2 and idx2 > idx1:
# print(type(annotating_models_copy[model][0][classno]),type(annotating_models_copy[model2][0][classno]))
# check if the class exists in the other model
if classno in range(len(self.annotating_models[model2][1])):
# check if an instance exists in the other model
if len(self.annotating_models[model2][1][classno]) > 0:
for instance2 in range(len(self.annotating_models[model2][1][classno])):
dirty = False
# print('checking class ' + str(classno) ' of models ' + model + str(idx1) + ' and ' + model2 + str(idx2))
# get the intersection percentage of the two masks
intersection = np.logical_and(
self.annotating_models[model][1][classno][instance], self.annotating_models[model2][1][classno][instance2])
intersection = np.sum(intersection)
union = np.logical_or(
self.annotating_models[model][1][classno][instance], self.annotating_models[model2][1][classno][instance2])
union = np.sum(union)
iou = intersection / union
# print('iou of class ' + str(classno) + ' instance ' + str(instance) + ' and instance ' + str(instance2) + ' is ' + str(iou))
if iou > 0.5:
if (annotating_models_copy[model][1][classno][instance] is None) or (annotating_models_copy[model2][1][classno][instance2] is None):
dirty = True
if dirty == False:
# merge their bboxes and store the result in result0
bbox1 = self.annotating_models[model][0][classno][instance]
bbox2 = self.annotating_models[model2][0][classno][instance2]
bbox = [min(bbox1[0], bbox2[0]), min(bbox1[1], bbox2[1]), max(
bbox1[2], bbox2[2]), max(bbox1[3], bbox2[3]), max(bbox1[4], bbox2[4])]
result0[classno].append(
bbox)
# store the merged mask in result1
result1[classno].append(np.logical_or(
self.annotating_models[model][1][classno][instance], self.annotating_models[model2][1][classno][instance2]))
# print('merging masks of class ' + str(classno) + ' instance ' + str(instance) + ' and instance ' + str(instance2) + ' of models ' + model + ' and ' + model2)
merged_counts += 1
# remove the mask from both models
annotating_models_copy[model][1][classno][instance] = None
annotating_models_copy[model2][1][classno][instance2] = None
annotating_models_copy[model][0][classno][instance] = None
annotating_models_copy[model2][0][classno][instance2] = None
# continue to the next instance of the first model
break
counts_here = {}
# add the remaining masks to the result
for model in annotating_models_copy.keys():
counts_here[model] = 0
for classno in range(len(annotating_models_copy[model][1])):
for instance in range(len(annotating_models_copy[model][1][classno])):
if annotating_models_copy[model][1][classno][instance] is not None:
counts_here[model] += 1
# print('adding mask of class ' + str(classno) + ' instance ' + str(instance) + ' of model ' + model)
result1[classno].append(
annotating_models_copy[model][1][classno][instance])
result0[classno].append(
annotating_models_copy[model][0][classno][instance])
# clear the annotating_models and add the result to it
self.annotating_models = {}
# self.annotating_models["merged"] = [result0 , result1]
for model in counts_here.keys():
print("model {} has {} instances".format(
model, counts_here[model]))
print("merged {} instances".format(merged_counts))
tac = time()
print("merging took {} ms".format((tac - tic) * 1000))
return result0, result1
# result will have ---> bbox , confidence , class_id , tracker_id , segment
# result of the detection phase only should be (bbox , confidence , class_id , segment)
def count_instances(annotating_models):
# separate the counts for each model
counts = {}
for model in annotating_models.keys():
counts[model] = 0
for classno in range(len(annotating_models[model][1])):
counts[model] += len(annotating_models[model][1][classno])
return counts
================================================
FILE: DLTA_AI_app/labelme/__init__.py
================================================
# flake8: noqa
import logging
import sys
from qtpy import QT_VERSION
__appname__ = "DLTA-AI"
# Semantic Versioning 2.0.0: https://semver.org/
# 1. MAJOR version when you make incompatible API changes;
# 2. MINOR version when you add functionality in a backwards-compatible manner;
# 3. PATCH version when you make backwards-compatible bug fixes.
__version__ = "1.1"
QT4 = QT_VERSION[0] == "4"
QT5 = QT_VERSION[0] == "5"
del QT_VERSION
PY2 = sys.version[0] == "2"
PY3 = sys.version[0] == "3"
del sys
from labelme.label_file import LabelFile
from labelme import testing
from labelme import utils
================================================
FILE: DLTA_AI_app/labelme/app.py
================================================
# -*- coding: utf-8 -*-
import functools
import json
import math
import re
import copy
import imgviz
import torch
import cv2
import warnings
import os
import os.path as osp
import numpy as np
from pathlib import Path
from PyQt6 import QtCore
from PyQt6.QtCore import Qt, QThread
from PyQt6 import QtGui
from PyQt6 import QtWidgets
from PyQt6.QtCore import QObject, pyqtSignal, pyqtSlot
from . import __appname__
from . import PY2
from . import QT5
from . import utils
from .utils.sam import Sam_Predictor
from .utils.helpers import visualizations, mathOps
from .utils.custom_exports import custom_exports_list
from .config import get_config
from .label_file import LabelFile
from .label_file import LabelFileError
from .logger import logger
from .shape import Shape
from .widgets import BrightnessContrastDialog, Canvas, LabelDialog, LabelListWidget, LabelListWidgetItem, ToolBar, UniqueLabelQListWidget, ZoomWidget
from .widgets import MsgBox, interpolation_UI, exportData_UI, deleteSelectedShape_UI, scaleObject_UI, getIDfromUser_UI, notification
from .widgets import runtime_data_UI, preferences_UI, shortcut_selector_UI, links, feedback_UI, check_updates_UI
from .widgets.editLabel_videoMode import editLabel_idChanged_UI, editLabel_handle_data
from .widgets.segmentation_options_UI import SegmentationOptionsUI
from .widgets.merge_feature_UI import MergeFeatureUI
from .intelligence import Intelligence
from .intelligence import coco_classes, color_palette
from supervision.detection.core import Detections
from trackers.multi_tracker_zoo import create_tracker
from ultralytics.yolo.utils.torch_utils import select_device
warnings.filterwarnings("ignore")
# the root of the repo
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0]
ROOT = ROOT.parents[0]
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
reid_weights = Path('osnet_x1_0_msmt17.pt')
LABEL_COLORMAP = imgviz.label_colormap(value=200)
class MainWindow(QtWidgets.QMainWindow):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = 0, 1, 2
tracking_progress_bar_signal = pyqtSignal(int)
def __init__(
self,
config=None,
filename=None,
output=None,
output_file=None,
output_dir=None,
):
self.buttons_text_style_sheet = "QPushButton {font-size: 10pt; margin: 2px 5px; padding: 2px 7px;font-weight: bold; background-color: #0d69f5; color: #FFFFFF;} QPushButton:hover {background-color: #4990ED;} QPushButton:disabled {background-color: #7A7A7A;}"
if output is not None:
logger.warning(
"argument output is deprecated, use output_file instead"
)
if output_file is None:
output_file = output
# see labelme/config/default_config.yaml for valid configuration
if config is None:
config = get_config()
self._config = config
self.decodingCanceled = False
# set default shape colors
Shape.line_color = QtGui.QColor(*self._config["shape"]["line_color"])
Shape.fill_color = QtGui.QColor(*self._config["shape"]["fill_color"])
Shape.select_line_color = QtGui.QColor(
*self._config["shape"]["select_line_color"]
)
Shape.select_fill_color = QtGui.QColor(
*self._config["shape"]["select_fill_color"]
)
Shape.vertex_fill_color = QtGui.QColor(
*self._config["shape"]["vertex_fill_color"]
)
Shape.hvertex_fill_color = QtGui.QColor(
*self._config["shape"]["hvertex_fill_color"]
)
# update models json
mathOps.update_saved_models_json(os.getcwd())
# add the segmentation UI controls interfance
self.segmentation_options_UI = SegmentationOptionsUI(self)
# add the merge ui interface
self.merge_feature_UI = MergeFeatureUI(self)
super(MainWindow, self).__init__()
try:
self.intelligenceHelper = Intelligence(self)
except:
print("it seems you have a problem with initializing model\ncheck you have at least one model")
self.helper_first_time_flag = True
else:
self.helper_first_time_flag = False
self.setWindowTitle(__appname__)
# Whether we need to save or not.
self.dirty = False
self._noSelectionSlot = False
# Main widgets and related state.
self.labelDialog = LabelDialog(
parent=self,
labels=self._config["labels"],
sort_labels=self._config["sort_labels"],
show_text_field=self._config["show_label_text_field"],
completion=self._config["label_completion"],
fit_to_content=self._config["fit_to_content"],
flags=self._config["label_flags"],
)
self.labelList = LabelListWidget()
self.lastOpenDir = None
self.flag_dock = self.flag_widget = None
self.flag_dock = QtWidgets.QDockWidget(self.tr("Flags"), self)
self.flag_dock.setObjectName("Flags")
self.flag_widget = QtWidgets.QListWidget()
if config["flags"]:
self.loadFlags({k: False for k in config["flags"]})
# self.flag_dock.setWidget(self.flag_widget)
self.flag_widget.itemChanged.connect(self.setDirty)
self.labelList.itemSelectionChanged.connect(self.labelSelectionChanged)
self.labelList.itemDoubleClicked.connect(self.editLabel)
self.labelList.itemChanged.connect(self.labelItemChanged)
self.labelList.itemDropped.connect(self.labelOrderChanged)
self.shape_dock = QtWidgets.QDockWidget(
self.tr("Polygon Labels"), self
)
self.shape_dock.setObjectName("Labels")
self.shape_dock.setWidget(self.labelList)
self.uniqLabelList = UniqueLabelQListWidget()
self.uniqLabelList.setToolTip(
self.tr(
"Select label to start annotating for it. "
"Press 'Esc' to deselect."
)
)
if self._config["labels"]:
for label in self._config["labels"]:
item = self.uniqLabelList.createItemFromLabel(label)
self.uniqLabelList.addItem(item)
rgb = self._get_rgb_by_label(label)
self.uniqLabelList.setItemLabel(item, label, rgb)
self.label_dock = QtWidgets.QDockWidget(self.tr(u"Label List"), self)
self.label_dock.setObjectName(u"Label List")
self.label_dock.setWidget(self.uniqLabelList)
self.fileSearch = QtWidgets.QLineEdit()
self.fileSearch.setPlaceholderText(self.tr("Search Filename"))
self.fileSearch.textChanged.connect(self.fileSearchChanged)
self.fileListWidget = QtWidgets.QListWidget()
self.fileListWidget.itemSelectionChanged.connect(
self.fileSelectionChanged
)
fileListLayout = QtWidgets.QVBoxLayout()
fileListLayout.setContentsMargins(0, 0, 0, 0)
fileListLayout.setSpacing(0)
fileListLayout.addWidget(self.fileSearch)
fileListLayout.addWidget(self.fileListWidget)
self.file_dock = QtWidgets.QDockWidget(self.tr(u"File List"), self)
self.file_dock.setObjectName(u"Files")
fileListWidget = QtWidgets.QWidget()
fileListWidget.setLayout(fileListLayout)
self.file_dock.setWidget(fileListWidget)
self.vis_dock = QtWidgets.QDockWidget(
self.tr(u"Visualization Options"), self)
self.vis_dock.setObjectName(u"Visualization Options")
self.vis_widget = QtWidgets.QWidget()
self.vis_dock.setWidget(self.vis_widget)
self.zoomWidget = ZoomWidget()
self.setAcceptDrops(True)
self.canvas = self.labelList.canvas = Canvas(
epsilon=self._config["epsilon"],
double_click=self._config["canvas"]["double_click"],
num_backups=self._config["canvas"]["num_backups"],
)
self.canvas.zoomRequest.connect(self.zoomRequest)
scrollArea = QtWidgets.QScrollArea()
scrollArea.setWidget(self.canvas)
scrollArea.setWidgetResizable(True)
self.scrollBars = {
Qt.Orientation.Vertical: scrollArea.verticalScrollBar(),
Qt.Orientation.Horizontal: scrollArea.horizontalScrollBar(),
Qt.Orientation.Horizontal.value: scrollArea.horizontalScrollBar(),
Qt.Orientation.Vertical.value: scrollArea.verticalScrollBar(),
}
self.canvas.scrollRequest.connect(self.scrollRequest)
self.canvas.newShape.connect(self.newShape)
self.canvas.shapeMoved.connect(self.setDirty)
self.canvas.selectionChanged.connect(self.shapeSelectionChanged)
self.canvas.drawingPolygon.connect(self.toggleDrawingSensitive)
self.canvas.edgeSelected.connect(self.canvasShapeEdgeSelected)
self.canvas.APPrefresh.connect(self.refresh_image_MODE)
# adding toolbars of SAM and and related slots
self.addSamControls()
# Canvas SAM slots
self.canvas.pointAdded.connect(self.run_sam_model)
self.canvas.samFinish.connect(self.sam_finish_annotation_button_clicked)
# SAM predictor
self.sam_predictor = None
self.current_sam_shape = None
self.SAM_SHAPES_IN_IMAGE = []
self.sam_last_mode = "rectangle"
self.setCentralWidget(scrollArea)
# for Export
self.target_directory = ""
self.save_path = ""
self.global_listObj = []
# for merge
self.multi_model_flag = False
# adding toolbars of video mode and and related slots
self.addVideoControls()
# for video annotation and tracking
self.frame_time = 0
self.FRAMES_TO_SKIP = 30
self.TRACK_ASSIGNED_OBJECTS_ONLY = False
self.TrackingMode = False
self.current_annotation_mode = ""
self.CURRENT_ANNOATAION_FLAGS = {"traj": False,
"bbox": True,
"id": True,
"class": True,
"mask": True,
"polygons": True,
"conf": True}
self.CURRENT_ANNOATAION_TRAJECTORIES = {'length': 30,
'alpha': 0.70}
self.CURRENT_SHAPES_IN_IMG = []
self.featuresOptions = {'deleteDefault': "this frame only",
'interpolationDefMethod': "linear",
'interpolationDefType': "all",
'interpolationOverwrite': False,
'EditDefault': "Edit only this frame"}
self.key_frames = {}
self.id_frames_rec = {}
self.copiedShapes = []
self.INDEX_OF_CURRENT_FRAME = 1
self.interrupted = False
self.minID = -2
self.maxID = 0
for dock in ["label_dock", "shape_dock", "file_dock", "vis_dock"]:
if self._config[dock]["closable"]:
getattr(self, dock).setFeatures(QtWidgets.QDockWidget.DockWidgetFeature.DockWidgetClosable)
if self._config[dock]["floatable"]:
getattr(self, dock).setFeatures(QtWidgets.QDockWidget.DockWidgetFeature.DockWidgetFloatable)
if self._config[dock]["movable"]:
getattr(self, dock).setFeatures(QtWidgets.QDockWidget.DockWidgetFeature.DockWidgetMovable)
if self._config[dock]["show"] is False:
getattr(self, dock).setVisible(False)
self.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, self.label_dock)
self.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, self.shape_dock)
self.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, self.file_dock)
self.addDockWidget(Qt.DockWidgetArea.RightDockWidgetArea, self.vis_dock)
# Actions
action = functools.partial(utils.newAction, self)
shortcuts = self._config["shortcuts"]
quit = action(
self.tr("&Quit"),
self.close,
shortcuts["quit"],
"quit",
self.tr("Quit application"),
)
open_ = action(
self.tr("&Open Image"),
self.openFile,
shortcuts["open"],
"open",
self.tr(f"Open image or label file ({str(shortcuts['open'])})"),
)
opendir = action(
self.tr("&Open Dir"),
self.openDirDialog,
shortcuts["open_dir"],
"opendir",
self.tr(f"Open Dir ({str(shortcuts['open_dir'])})"),
)
save = action(
self.tr("&Save"),
self.saveFile,
shortcuts["save"],
"save",
self.tr(f"Save labels to file ({str(shortcuts['save'])})"),
enabled=False,
)
export = action(
self.tr("&Export"),
self.exportData,
shortcuts["export"],
"export",
self.tr(
f"Export annotations to COCO format ({str(shortcuts['export'])})"),
enabled=False,
)
modelExplorer = action(
self.tr("&Model Explorer"),
self.model_explorer,
None,
"checklist",
self.tr(u"Model Explorer"),
)
saveAs = action(
self.tr("&Save As"),
self.saveFileAs,
shortcuts["save_as"],
"save-as",
self.tr("Save labels to a different file"),
enabled=False,
)
deleteFile = action(
self.tr("&Delete File"),
self.deleteFile,
shortcuts["delete_file"],
"delete",
self.tr("Delete current label file"),
enabled=False,
)
changeOutputDir = action(
self.tr("&Change Output Dir"),
slot=self.changeOutputDirDialog,
shortcut=shortcuts["save_to"],
icon="open",
tip=self.tr(u"Change where annotations are loaded/saved"),
)
saveAuto = action(
text=self.tr("Save &Automatically"),
slot=lambda x: self.actions.saveAuto.setChecked(x),
icon="save",
tip=self.tr("Save automatically"),
checkable=True,
enabled=True,
)
saveAuto.setChecked(self._config["auto_save"])
saveWithImageData = action(
text="Save With Image Data",
slot=self.enableSaveImageWithData,
tip="Save image data in label file",
checkable=True,
checked=self._config["store_data"],
)
close = action(
"&Close",
self.closeFile,
shortcuts["close"],
"close",
"Close current file",
)
toggle_keep_prev_mode = action(
self.tr("Keep Previous Annotation"),
self.toggleKeepPrevMode,
shortcuts["toggle_keep_prev_mode"],
None,
self.tr('Toggle "keep pevious annotation" mode'),
checkable=True,
)
toggle_keep_prev_mode.setChecked(self._config["keep_prev"])
createMode = action(
self.tr("Create Polygons"),
self.setCreateMode,
shortcuts["create_polygon"],
"objects",
self.tr("Start drawing polygons"),
enabled=False,
)
editMode = action(
self.tr("Edit Polygons"),
self.setEditMode,
shortcuts["edit_polygon"],
"edit",
self.tr("Move and edit the selected polygons"),
enabled=False,
)
delete = action(
self.tr("Delete Polygons"),
self.deleteSelectedShape,
shortcuts["delete_polygon"],
"close",
self.tr("Delete the selected polygons"),
enabled=False,
)
copy = action(
self.tr("Duplicate Polygons"),
self.copySelectedShape,
shortcuts["duplicate_polygon"],
"copy",
self.tr("Create a duplicate of the selected polygons"),
enabled=False,
)
undoLastPoint = action(
self.tr("Undo last point"),
self.canvas.undoLastPoint,
shortcuts["undo_last_point"],
"undo",
self.tr("Undo last drawn point"),
enabled=False,
)
addPointToEdge = action(
text=self.tr("Add Point to Edge"),
slot=self.canvas.addPointToEdge,
shortcut=shortcuts["add_point_to_edge"],
icon="add_point",
tip=self.tr("Add point to the nearest edge"),
enabled=False,
)
removePoint = action(
text="Remove Selected Point",
slot=self.removeSelectedPoint,
icon="edit",
tip="Remove selected point from polygon",
enabled=False,
)
undo = action(
self.tr("Undo"),
self.undoShapeEdit,
shortcuts["undo"],
"undo",
self.tr("Undo last add and edit of shape"),
enabled=False,
)
hideAll = action(
self.tr("&Hide\nPolygons"),
functools.partial(self.togglePolygons, False),
icon="eye",
tip=self.tr("Hide all polygons"),
enabled=False,
)
showAll = action(
self.tr("&Show\nPolygons"),
functools.partial(self.togglePolygons, True),
icon="eye",
tip=self.tr("Show all polygons"),
enabled=False,
)
zoom = QtWidgets.QWidgetAction(self)
zoom.setDefaultWidget(self.zoomWidget)
self.zoomWidget.setWhatsThis(
self.tr(
"Zoom in or out of the image. Also accessible with "
"{} and {} from the canvas."
).format(
utils.fmtShortcut(
"{},{}".format(shortcuts["zoom_in"], shortcuts["zoom_out"])
),
utils.fmtShortcut(self.tr("Ctrl+Wheel")),
)
)
self.zoomWidget.setEnabled(False)
zoomIn = action(
self.tr("Zoom &In"),
functools.partial(self.addZoom, 1.1),
shortcuts["zoom_in"],
"zoom-in",
self.tr("Increase zoom level"),
enabled=False,
)
zoomOut = action(
self.tr("&Zoom Out"),
functools.partial(self.addZoom, 0.9),
shortcuts["zoom_out"],
"zoom-out",
self.tr("Decrease zoom level"),
enabled=False,
)
zoomOrg = action(
self.tr("&Original size"),
functools.partial(self.setZoom, 100),
shortcuts["zoom_to_original"],
"zoom",
self.tr("Zoom to original size"),
enabled=False,
)
fitWindow = action(
self.tr("&Fit Window"),
self.setFitWindow,
shortcuts["fit_window"],
"fit-window",
self.tr("Zoom follows window size"),
checkable=True,
enabled=False,
)
fitWidth = action(
self.tr("Fit &Width"),
self.setFitWidth,
shortcuts["fit_width"],
"fit-width",
self.tr("Zoom follows window width"),
checkable=True,
enabled=False,
)
brightnessContrast = action(
"&Brightness Contrast",
self.brightnessContrast,
None,
"color",
"Adjust brightness and contrast",
enabled=False,
)
show_cross_line = action(
self.tr("&Toggle Cross Line"),
self.enable_show_cross_line,
tip=self.tr("cross line for mouse position"),
icon="cartesian",
checkable=True,
checked=self._config["show_cross_line"],
enabled=True,
)
# Group zoom controls into a list for easier toggling.
zoomActions = (
self.zoomWidget,
zoomIn,
zoomOut,
zoomOrg,
fitWindow,
fitWidth,
)
self.zoomMode = self.FIT_WINDOW
fitWindow.setChecked(True)
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
edit = action(
self.tr("Edit &Label"),
self.editLabel,
shortcuts["edit_label"],
"label",
self.tr("Modify the label of the selected polygon"),
enabled=False,
)
enhance = action(
self.tr("&Enhace Polygons"),
self.sam_enhance_annotation_button_clicked,
shortcuts["SAM_enhance"],
"SAM",
self.tr("Enhance the selected polygon with AI"),
enabled=True,
)
interpolate = action(
self.tr("&Interpolation Tracking"),
self.interpolateMENU,
shortcuts["interpolate"],
"tracking",
self.tr("Interpolate the selected polygon between to frames to Track it"),
enabled=True,
)
mark_as_key = action(
self.tr("&Mark as key"),
self.mark_as_key,
shortcuts["mark_as_key"],
"mark",
self.tr("Mark this frame as KEY for interpolation"),
enabled=True,
)
remove_all_keyframes = action(
self.tr("&Remove all keyframes"),
self.remove_all_keyframes,
None,
"mark",
self.tr("Remove all keyframes"),
enabled=True,
)
scale = action(
self.tr("&Scale"),
self.scaleMENU,
shortcuts["scale"],
"resize",
self.tr("Scale the selected polygon"),
enabled=True,
)
copyShapes = action(
self.tr("&Copy"),
self.ctrlCopy,
shortcuts["copy"],
"copy",
self.tr("Copy selected polygons"),
enabled=True,
)
pasteShapes = action(
self.tr("&Paste"),
self.ctrlPaste,
shortcuts["paste"],
"paste",
self.tr("paste copied polygons"),
enabled=True,
)
update_curr_frame = action(
self.tr("&Update current frame"),
self.update_current_frame_annotation_button_clicked,
None,
"done",
self.tr("Update frame"),
enabled=True,
)
ignore_changes = action(
self.tr("&Ignore changes"),
self.main_video_frames_slider_changed,
shortcuts["ignore_updates"],
"delete",
self.tr("Ignore unsaved changes"),
enabled=True,
)
fill_drawing = action(
self.tr("Fill Drawing Polygon"),
self.canvas.setFillDrawing,
None,
"color",
self.tr("Fill polygon while drawing"),
checkable=True,
enabled=True,
)
fill_drawing.trigger()
# intelligence actions
annotate_one_action = action(
self.tr("Run Model on Current Image"),
self.annotate_one,
None,
"open",
self.tr("Run Model on Current Image")
)
annotate_batch_action = action(
self.tr("Run Model on All Images"),
self.annotate_batch,
None,
"file",
self.tr("Run Model on All Images")
)
set_conf_threshold = action(
self.tr("Confidence Threshold"),
self.setConfThreshold,
None,
"tune",
self.tr("Confidence Threshold")
)
set_iou_threshold = action(
self.tr("IOU Threshold (NMS)"),
self.setIOUThreshold,
None,
"iou",
self.tr("IOU Threshold (Non Maximum Suppression)")
)
select_classes = action(
self.tr("Select Classes"),
self.selectClasses,
None,
"checklist",
self.tr("Select Classes to be Annotated")
)
merge_segmentation_models = action(
self.tr("Merge Segmentation Models"),
self.mergeSegModels,
None,
"merge",
self.tr("Merge Segmentation Models")
)
runtime_data = action(
self.tr("Show Runtime Data"),
runtime_data_UI.PopUp,
None,
"runtime",
self.tr("Show Runtime Data")
)
git_hub = action(
self.tr("GitHub Repository"),
links.open_git_hub,
None,
"github",
self.tr("GitHub Repository")
)
feedback = action(
self.tr("Feedback"),
feedback_UI.PopUp,
None,
"feedback",
self.tr("Feedback")
)
license = action(
self.tr("license"),
links.open_license,
None,
"license",
self.tr("license")
)
user_guide = action(
self.tr("User Guide"),
links.open_guide,
None,
"guide",
self.tr("User Guide")
)
check_updates = action(
self.tr("Check for Updates"),
check_updates_UI.PopUp,
None,
"info",
self.tr("Check for Updates")
)
preferences = action(
self.tr("Preferences"),
preferences_UI.PopUp,
None,
"settings",
self.tr("Preferences")
)
shortcut_selector = action(
self.tr("Shortcuts"),
shortcut_selector_UI.PopUp,
None,
"shortcuts",
self.tr("Shortcuts")
)
sam = action(
self.tr("Toggle SAM Toolbar"),
self.Segment_anything,
None,
"SAM",
self.tr("Toggle SAM Toolbar")
)
openVideo = action(
self.tr("Open &Video"),
self.openVideo,
shortcuts["open_video"],
"video",
self.tr(f"Open a video file ({shortcuts['open_video']})"),
)
openVideoFrames = action(
self.tr("Open Video as Frames"),
self.openVideoFrames,
shortcuts["open_video_frames"],
"frames",
self.tr(
f"Open Video as Frames ({shortcuts['open_video_frames']})"),
)
# Lavel list context menu.
labelmenu = QtWidgets.QMenu()
utils.addActions(labelmenu, (edit, delete))
self.labelList.setContextMenuPolicy(Qt.ContextMenuPolicy.CustomContextMenu)
self.labelList.customContextMenuRequested.connect(
self.popLabelListMenu
)
# Store actions for further handling.
self.actions = utils.struct(
saveAuto=saveAuto,
saveWithImageData=saveWithImageData,
changeOutputDir=changeOutputDir,
save=save,
saveAs=saveAs,
open=open_,
close=close,
deleteFile=deleteFile,
toggleKeepPrevMode=toggle_keep_prev_mode,
delete=delete,
edit=edit,
copy=copy,
undoLastPoint=undoLastPoint,
undo=undo,
addPointToEdge=addPointToEdge,
removePoint=removePoint,
createMode=createMode,
editMode=editMode,
zoom=zoom,
zoomIn=zoomIn,
zoomOut=zoomOut,
zoomOrg=zoomOrg,
fitWindow=fitWindow,
fitWidth=fitWidth,
brightnessContrast=brightnessContrast,
show_cross_line=show_cross_line,
zoomActions=zoomActions,
export=export,
openVideo=openVideo,
openVideoFrames=openVideoFrames,
fileMenuActions=(open_, opendir, save, saveAs, close, quit),
modelExplorer=modelExplorer,
runtime_data=runtime_data,
tool=(),
# XXX: need to add some actions here to activate the shortcut
editMenu=(
edit,
copy,
delete,
None,
undo,
undoLastPoint,
None,
addPointToEdge,
),
# menu shown at right click
menu=(
createMode,
editMode,
edit,
enhance,
interpolate,
mark_as_key,
remove_all_keyframes,
scale,
copyShapes,
pasteShapes,
copy,
delete,
undo,
undoLastPoint,
addPointToEdge,
removePoint,
update_curr_frame,
ignore_changes
),
onLoadActive=(
close,
createMode,
editMode,
brightnessContrast,
),
onShapesPresent=(saveAs, hideAll, showAll),
)
self.canvas.vertexSelected.connect(self.actions.removePoint.setEnabled)
self.menus = utils.struct(
file=self.menu(self.tr("&File")),
edit=self.menu(self.tr("&Edit")),
view=self.menu(self.tr("&View")),
intelligence=self.menu(self.tr("&Auto Annotation")),
model_selection=self.menu(self.tr("&Model Selection")),
options=self.menu(self.tr("&Options")),
help=self.menu(self.tr("&Help")),
recentFiles=QtWidgets.QMenu(self.tr("Open &Recent")),
saved_models=QtWidgets.QMenu(self.tr("Select Segmentation model")),
tracking_models=QtWidgets.QMenu(self.tr("Select Tracking model")),
labelList=labelmenu,
certain_area=QtWidgets.QMenu(self.tr("Select Certain Area")),
ui_elements=QtWidgets.QMenu(self.tr("&Show UI Elements")),
zoom_options=QtWidgets.QMenu(self.tr("&Zoom Options")),
)
utils.addActions(
self.menus.file,
(
open_,
opendir,
openVideo,
openVideoFrames,
None,
save,
saveAs,
export,
None,
close,
quit,
),
)
utils.addActions(self.menus.intelligence,
(annotate_one_action,
annotate_batch_action,
)
)
# View menu and its submenus
self.menus.ui_elements.setIcon(QtGui.QIcon("labelme/icons/UI.png"))
utils.addActions(self.menus.ui_elements,
(
self.vis_dock.toggleViewAction(),
self.label_dock.toggleViewAction(),
self.shape_dock.toggleViewAction(),
self.file_dock.toggleViewAction(),
)
)
self.menus.zoom_options.setIcon(QtGui.QIcon("labelme/icons/zoom.png"))
utils.addActions(self.menus.zoom_options,
(
zoomIn,
zoomOut,
zoomOrg,
None,
fitWindow,
fitWidth,
)
)
utils.addActions(
self.menus.view,
(sam,
self.menus.ui_elements,
None,
hideAll,
showAll,
None,
self.menus.zoom_options,
None,
show_cross_line,
),
)
# Model selection menu
self.menus.saved_models.setIcon(
QtGui.QIcon("labelme/icons/brain.png"))
self.menus.tracking_models.setIcon(
QtGui.QIcon("labelme/icons/tracking.png"))
self.menus.certain_area.setIcon(
QtGui.QIcon("labelme/icons/polygon.png"))
utils.addActions(
self.menus.model_selection,
(
self.menus.saved_models,
merge_segmentation_models,
None,
self.menus.tracking_models,
None,
modelExplorer,
),
)
# Options menu
utils.addActions(
self.menus.options,
(
set_conf_threshold,
set_iou_threshold,
self.menus.certain_area,
None,
select_classes,
),
)
# Help menu
utils.addActions(
self.menus.help,
(
user_guide,
preferences,
shortcut_selector,
None,
git_hub,
feedback,
None,
runtime_data,
None,
license,
check_updates
),
)
self.menus.file.aboutToShow.connect(self.updateFileMenu)
self.menus.file.aboutToShow.connect(self.update_models_menu)
# Custom context menu for the canvas widget:
utils.addActions(self.canvas.menus[0], self.actions.menu)
utils.addActions(
self.canvas.menus[1],
(
action("&Copy here", self.copyShape),
action("&Move here", self.moveShape),
),
)
self.tools = self.toolbar("Tools")
# Menu buttons on Left
self.actions.tool = (
open_,
opendir,
openVideo,
None,
save,
export,
None,
createMode,
editMode,
edit,
None,
delete,
undo,
None,
)
self.statusBar().showMessage(self.tr("%s started.") % __appname__)
self.statusBar().show()
if output_file is not None and self._config["auto_save"]:
logger.warn(
"If `auto_save` argument is True, `output_file` argument "
"is ignored and output filename is automatically "
"set as IMAGE_BASENAME.json."
)
self.output_file = output_file
self.output_dir = output_dir
# Application state.
self.image = QtGui.QImage()
self.imagePath = None
self.recentFiles = []
self.maxRecent = 7
self.otherData = None
self.zoom_level = 100
self.fit_window = False
self.zoom_values = {} # key=filename, value=(zoom_mode, zoom_value)
self.brightnessContrast_values = {}
self.scroll_values = {
Qt.Orientation.Horizontal: {},
Qt.Orientation.Vertical: {},
Qt.Orientation.Horizontal.value: {},
Qt.Orientation.Vertical.value: {},
} # key=filename, value=scroll_value
if filename is not None and osp.isdir(filename):
self.importDirImages(filename, load=False)
else:
self.filename = filename
if config["file_search"]:
self.fileSearch.setText(config["file_search"])
self.fileSearchChanged()
# XXX: Could be completely declarative.
# Restore application settings.
self.settings = QtCore.QSettings("labelme", "labelme")
# FIXME: QSettings.value can return None on PyQt4
self.recentFiles = self.settings.value("recentFiles", []) or []
size = self.settings.value("window/size", QtCore.QSize(600, 500))
position = self.settings.value("window/position", QtCore.QPoint(0, 0))
self.resize(size)
self.move(position)
# or simply:
# self.restoreGeometry(settings['window/geometry']
self.restoreState(
self.settings.value("window/state", QtCore.QByteArray())
)
# Populate the File menu dynamically.
self.updateFileMenu()
self.update_models_menu()
# Since loading the file may take some time,
# make sure it runs in the background.
if self.filename is not None:
self.queueEvent(functools.partial(self.loadFile, self.filename))
# Callbacks:
self.zoomWidget.valueChanged.connect(self.paintCanvas)
self.populateModeActions()
self.right_click_menu()
QtGui.QShortcut(QtGui.QKeySequence(self._config['shortcuts']['stop']), self).activated.connect(self.Escape_clicked)
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
utils.addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName("%sToolBar" % title)
# toolbar.setOrientation(Qt.Orientation.Vertical)
toolbar.setToolButtonStyle(Qt.ToolButtonStyle.ToolButtonTextUnderIcon)
if actions:
utils.addActions(toolbar, actions)
self.addToolBar(Qt.ToolBarArea.LeftToolBarArea, toolbar)
return toolbar
# Support Functions
def noShapes(self):
return not len(self.labelList)
def populateModeActions(self):
tool, menu = self.actions.tool, self.actions.menu
self.tools.clear()
utils.addActions(self.tools, tool)
self.canvas.menus[0].clear()
utils.addActions(self.canvas.menus[0], menu)
self.menus.edit.clear()
actions = (
self.actions.editMode,
)
utils.addActions(self.menus.edit, actions + self.actions.editMenu)
def setDirty(self):
# Even if we autosave the file, we keep the ability to undo
self.actions.undo.setEnabled(self.canvas.isShapeRestorable)
if self._config["auto_save"] or self.actions.saveAuto.isChecked():
if self.output_dir:
label_file_without_path = osp.basename(label_file)
label_file = osp.join(self.output_dir, label_file_without_path)
if os.path.isdir(label_file):
os.remove(label_file)
self.saveLabels(label_file)
return
self.dirty = True
self.actions.save.setEnabled(True)
title = __appname__
if self.filename is not None:
title = "{} - {}*".format(title, self.filename)
self.setWindowTitle(title)
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.createMode.setEnabled(True)
title = __appname__
if self.filename is not None:
title = "{} - {}".format(title, self.filename)
self.setWindowTitle(title)
if self.hasLabelFile():
self.actions.deleteFile.setEnabled(True)
else:
self.actions.deleteFile.setEnabled(False)
def toggleActions(self, value=True):
"""Enable/Disable widgets which depend on an opened image."""
for z in self.actions.zoomActions:
z.setEnabled(value)
for action in self.actions.onLoadActive:
action.setEnabled(value)
def canvasShapeEdgeSelected(self, selected, shape):
self.actions.addPointToEdge.setEnabled(
selected and shape and shape.canAddPoint()
)
def queueEvent(self, function):
QtCore.QTimer.singleShot(0, function)
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def resetState(self):
self.labelList.clear()
self.filename = None
self.imagePath = None
self.imageData = None
self.CURRENT_FRAME_IMAGE = None
self.labelFile = None
self.otherData = None
self.canvas.resetState()
def currentItem(self):
items = self.labelList.selectedItems()
if items:
return items[0]
return None
def addRecentFile(self, filename):
if filename in self.recentFiles:
self.recentFiles.remove(filename)
elif len(self.recentFiles) >= self.maxRecent:
self.recentFiles.pop()
self.recentFiles.insert(0, filename)
# Callbacks
def Escape_clicked(self):
"""
Summary:
This function is called when the user presses the escape key.
It resets the SAM toolbar and the canvas.
It also interrupts the current annotation process like (tracking, interpolation, etc.)
"""
self.interrupted = True
self.sam_reset_button_clicked()
if self.canvas.tracking_area == "drawing":
self.certain_area_clicked(1)
def undoShapeEdit(self):
self.canvas.restoreShape()
self.labelList.clear()
self.loadShapes(self.canvas.shapes)
self.actions.undo.setEnabled(self.canvas.isShapeRestorable)
def toggleDrawingSensitive(self, drawing=True):
"""Toggle drawing sensitive.
In the middle of drawing, toggling between modes should be disabled.
"""
self.actions.editMode.setEnabled(not drawing)
self.actions.undoLastPoint.setEnabled(drawing)
self.actions.undo.setEnabled(not drawing)
self.actions.delete.setEnabled(not drawing)
def toggleDrawMode(self, edit=True, createMode="polygon"):
self.canvas.setEditing(edit)
self.canvas.createMode = createMode
if edit:
self.actions.createMode.setEnabled(True)
else:
if createMode == "polygon":
self.actions.createMode.setEnabled(False)
else:
self.actions.createMode.setEnabled(True)
self.actions.editMode.setEnabled(not edit)
def setEditMode(self):
self.turnOFF_SAM()
try:
x = self.CURRENT_VIDEO_PATH
except:
self.toggleDrawMode(True)
return
self.update_current_frame_annotation()
self.toggleDrawMode(True)
def updateFileMenu(self):
current = self.filename
def exists(filename):
return osp.exists(str(filename))
menu = self.menus.recentFiles
menu.clear()
files = [f for f in self.recentFiles if f != current and exists(f)]
for i, f in enumerate(files):
icon = utils.newIcon("brain")
action = QtGui.QAction(
icon, "&%d %s" % (i + 1, QtCore.QFileInfo(f).fileName()), self
)
action.triggered.connect(functools.partial(self.loadRecent, f))
menu.addAction(action)
def update_models_menu(self):
menu = self.menus.saved_models
menu.clear()
with open("saved_models.json") as json_file:
data = json.load(json_file)
# loop through all the models
i = 0
for model_name in list(data.keys()):
if i >= 6:
break
icon = utils.newIcon("brain")
action = QtGui.QAction(
icon, "&%d %s" % (i + 1, model_name), self)
action.triggered.connect(functools.partial(
self.change_curr_model, model_name))
menu.addAction(action)
i += 1
self.add_tracking_models_menu()
self.add_certain_area_menu()
def add_tracking_models_menu(self):
menu2 = self.menus.tracking_models
menu2.clear()
icon = utils.newIcon("tracking")
action = QtGui.QAction(
icon, "1 Byte track (DEFAULT)", self)
action.triggered.connect(
lambda: self.update_tracking_method('bytetrack'))
menu2.addAction(action)
icon = utils.newIcon("tracking")
action = QtGui.QAction(
icon, "2 Strong SORT (lowest id switch)", self)
action.triggered.connect(
lambda: self.update_tracking_method('strongsort'))
menu2.addAction(action)
icon = utils.newIcon("tracking")
action = QtGui.QAction(
icon, "3 Deep SORT", self)
action.triggered.connect(
lambda: self.update_tracking_method('deepocsort'))
menu2.addAction(action)
icon = utils.newIcon("tracking")
action = QtGui.QAction(
icon, "4 OC SORT", self)
action.triggered.connect(lambda: self.update_tracking_method('ocsort'))
menu2.addAction(action)
icon = utils.newIcon("tracking")
action = QtGui.QAction(
icon, "5 BoT SORT", self)
action.triggered.connect(
lambda: self.update_tracking_method('botsort'))
menu2.addAction(action)
def add_certain_area_menu(self):
menu3 = self.menus.certain_area
menu3.clear()
icon = utils.newIcon("polygon")
action = QtGui.QAction(
icon, "Select Certain Area", self)
action.triggered.connect(
lambda: self.certain_area_clicked(1))
menu3.addAction(action)
icon = utils.newIcon("rectangle")
action = QtGui.QAction(
icon, "Cancel Area", self)
action.triggered.connect(
lambda: self.certain_area_clicked(0))
menu3.addAction(action)
def update_tracking_method(self, method='bytetrack'):
self.waitWindow(
visible=True, text=f'Please Wait.\n{method} is Loading...')
self.tracking_method = method
self.tracking_config = ROOT / 'trackers' / \
method / 'configs' / (method + '.yaml')
with torch.no_grad():
device = select_device('')
print(
f'tracking method {self.tracking_method} , config {self.tracking_config} , reid {reid_weights} , device {device} , half {False}')
self.tracker = create_tracker(
self.tracking_method, self.tracking_config, reid_weights, device, False)
if hasattr(self.tracker, 'model'):
if hasattr(self.tracker.model, 'warmup'):
self.tracker.model.warmup()
self.waitWindow()
print(f'Changed tracking method to {method}')
def popLabelListMenu(self, point):
self.menus.labelList.exec(self.labelList.mapToGlobal(point))
def validateLabel(self, label):
# no validation
if self._config["validate_label"] is None:
return True
for i in range(self.uniqLabelList.count()):
label_i = self.uniqLabelList.item(i).data(Qt.ItemDataRole.UserRole)
if self._config["validate_label"] in ["exact"]:
if label_i == label:
return True
return False
def setCreateMode(self):
self.turnON_SAM()
self.toggleDrawMode(False, createMode="polygon")
return
def editLabel(self, item=None):
if self.current_annotation_mode == 'video':
self.update_current_frame_annotation()
if item and not isinstance(item, LabelListWidgetItem):
raise TypeError("item must be LabelListWidgetItem type")
if not self.canvas.editing():
return
if not item:
item = self.currentItem()
if item is None:
return
shape = item.shape()
if shape is None:
return
old_text, old_flags, old_group_id, old_content = self.labelDialog.popUp(
text=shape.label,
flags=shape.flags,
group_id=shape.group_id,
content=shape.content,
skip_flag=True
)
text, flags, new_group_id, content = self.labelDialog.popUp(
text=shape.label,
flags=shape.flags,
group_id=shape.group_id,
content=shape.content
)
if text is None:
return
if not self.validateLabel(text):
self.errorMessage(
self.tr("Invalid label"),
self.tr("Invalid label '{}' with validation type '{}'").format(
text, self._config["validate_label"]
),
)
return
shape.label = text
shape.flags = flags
shape.group_id = new_group_id
shape.content = str(content)
# if img or dir -> do smth then return
if self.current_annotation_mode == 'img' or self.current_annotation_mode == 'dir':
item.setText(f'{shape.label}')
self.setDirty()
if not self.uniqLabelList.findItemsByLabel(shape.label):
item = QtWidgets.QListWidgetItem()
item.setData(Qt.ItemDataRole.UserRole, shape.label)
self.uniqLabelList.addItem(item)
self.refresh_image_MODE()
return
# now we are in video mode
if shape.group_id is None:
item.setText(shape.label)
else:
idChanged = old_group_id != new_group_id
result, self.featuresOptions, only_this_frame, duplicates = editLabel_idChanged_UI(
self.featuresOptions,
old_group_id,
new_group_id,
self.id_frames_rec,
self.INDEX_OF_CURRENT_FRAME)
if duplicates or result != QtWidgets.QDialog.DialogCode.Accepted:
shape.label = old_text
shape.flags = old_flags
shape.content = old_content
shape.group_id = old_group_id
return
self.minID = min(self.minID, new_group_id - 1)
listObj = self.load_objects_from_json__orjson()
self.id_frames_rec, self.CURRENT_ANNOATAION_TRAJECTORIES, listObj = editLabel_handle_data(
currFrame=self.INDEX_OF_CURRENT_FRAME,
listObj=listObj,
trajectories=self.CURRENT_ANNOATAION_TRAJECTORIES,
id_frames_rec=self.id_frames_rec,
idChanged=idChanged,
only_this_frame=only_this_frame,
shape=shape,
old_group_id=old_group_id,
new_group_id=new_group_id,)
self.load_objects_to_json__orjson(listObj)
self.main_video_frames_slider_changed()
def mark_as_key(self):
"""
Summary:
This function is called when the user presses the "Mark as Key" button.
It marks the selected shape as a key frame.
"""
try:
self.update_current_frame_annotation()
id = self.canvas.selectedShapes[0].group_id
try:
if self.INDEX_OF_CURRENT_FRAME not in self.key_frames['id_' + str(id)]:
self.key_frames['id_' +
str(id)].add(self.INDEX_OF_CURRENT_FRAME)
else:
res = MsgBox.OKmsgBox(
"Caution", f"Frame {self.INDEX_OF_CURRENT_FRAME} is already a key frame for ID {id}.\nDo you want to remove it?", "warning", turnResult=True)
if res == QtWidgets.QMessageBox.StandardButton.Ok:
self.key_frames['id_' +
str(id)].remove(self.INDEX_OF_CURRENT_FRAME)
else:
return
except:
self.key_frames['id_' +
str(id)] = set()
self.key_frames['id_' +
str(id)].add(self.INDEX_OF_CURRENT_FRAME)
self.main_video_frames_slider_changed()
except Exception as e:
MsgBox.OKmsgBox("Error", f"Error: {e}", "critical")
def remove_all_keyframes(self):
try:
self.update_current_frame_annotation()
id = self.canvas.selectedShapes[0].group_id
self.key_frames['id_' + str(id)] = set()
except:
pass
def rec_frame_for_id(self, id, frame, type_='add'):
"""
Summary:
To store the frames in which the object with the given id is present.
Args:
id (int): The id of the object.
frame (int): The frame number.
type_ (str, optional): 'add' or 'remove'. Defaults to 'add'.
'add' to add the frame to the list of frames in which the object is present.
'remove' to remove the frame from the list of frames in which the object is present.
Returns:
None
"""
if type_ == 'add':
try:
self.id_frames_rec['id_' + str(id)].add(frame)
except:
self.id_frames_rec['id_' + str(id)] = set()
self.id_frames_rec['id_' + str(id)].add(frame)
else:
try:
self.id_frames_rec['id_' + str(id)].remove(frame)
except:
pass
def interpolateMENU(self, item=None):
try:
if len(self.canvas.selectedShapes) == 0:
mb = QtWidgets.QMessageBox
msg = self.tr("Interpolate all IDs?\n")
answer = mb.warning(self, self.tr(
"Attention"), msg, mb.StandardButton.Yes | mb.StandardButton.No)
if answer != mb.StandardButton.Yes:
return
else:
self.update_current_frame_annotation()
keys = list(self.id_frames_rec.keys())
idsORG = [int(keys[i][3:]) for i in range(len(keys))]
else:
self.update_current_frame_annotation()
idsORG = [shape.group_id for shape in self.canvas.selectedShapes]
id = self.canvas.selectedShapes[0].group_id
result, self.featuresOptions = interpolation_UI.PopUp(self.featuresOptions)
if result != QtWidgets.QDialog.DialogCode.Accepted:
return
with_linear = True if self.featuresOptions['interpolationDefMethod'] == 'linear' else False
with_sam = True if self.featuresOptions['interpolationDefMethod'] == 'SAM' else False
with_keyframes = True if self.featuresOptions['interpolationDefType'] == 'key' else False
if with_keyframes:
allAccepted, allRejected, ids = mathOps.checkKeyFrames(
idsORG, self.key_frames)
if not allAccepted:
if allRejected:
MsgBox.OKmsgBox("Key Frames Error",
f"All of the selected IDs have no KEY frames.\n ie. less than 2 key frames\n The interpolation is NOT performed.")
return
else:
resutl = MsgBox.OKmsgBox("Key Frames Error",
f"Some of the selected IDs have no KEY frames.\n ie. less than 2 key frames\n The interpolation is performed only for the IDs with KEY frames.\nIDs: {ids}.", "info", turnResult=True)
if resutl != QtWidgets.QMessageBox.StandardButton.Ok:
return
else:
ids = idsORG
self.interrupted = False
if with_sam:
self.interpolate_with_sam(ids, with_keyframes)
else:
for id in ids:
QtWidgets.QApplication.processEvents()
if self.interrupted:
self.interrupted = False
break
self.interpolate(id=id,
only_edited=with_keyframes)
self.waitWindow()
except Exception as e:
MsgBox.OKmsgBox("Error", f"Error: {e}", "critical")
def interpolate(self, id, only_edited=False):
"""
Summary:
This function is called when the user presses the "Interpolate" button.
It interpolates the object with the given id.
Args:
id (int): The id of the object.
only_edited (bool, optional): True to interpolate using only the key frames. Defaults to False.
"""
self.waitWindow(
visible=True, text=f'Please Wait.\nID {id} is being interpolated...')
listObj = self.load_objects_from_json__orjson()
if only_edited:
try:
FRAMES = list(self.key_frames['id_' + str(id)])
except:
return
else:
FRAMES = list(self.id_frames_rec['id_' + str(id)]) if len(
self.id_frames_rec['id_' + str(id)]) > 1 else [-1]
first_frame_idx = min(FRAMES)
last_frame_idx = max(FRAMES)
if (first_frame_idx >= last_frame_idx):
return
records = [None for i in range(first_frame_idx - 1, last_frame_idx, 1)]
for frame in range(first_frame_idx, last_frame_idx + 1, 1):
listobjframe = listObj[frame - 1]['frame_idx']
frameobjects = listObj[frame - 1]['frame_data']
for object_ in frameobjects:
if (object_['tracker_id'] == id):
if ((not only_edited) or (listobjframe in FRAMES)):
records[frame -
first_frame_idx] = copy.deepcopy(object_)
break
baseObject = None
baseObjectFrame = None
nextObject = None
nextObjectFrame = None
for frame in range(first_frame_idx, last_frame_idx, 1):
QtWidgets.QApplication.processEvents()
if self.interrupted:
break
listobjframe = listObj[frame - 1]['frame_idx']
frameobjects = listObj[frame - 1]['frame_data']
# if object is present in this frame, then it is base object and we calculate next object
if (records[frame - first_frame_idx] is not None):
# assign it as base object
baseObject = copy.deepcopy(records[frame - first_frame_idx])
baseObjectFrame = frame
# find next object
for j in range(frame + 1, last_frame_idx + 1, 1):
if (records[j - first_frame_idx] != None):
nextObject = copy.deepcopy(
records[j - first_frame_idx])
nextObjectFrame = j
break
# job done, go to next frame
continue
# if only_edited is true and the frame is not key, then we remove the object from the frame to be interpolated
if (only_edited and (frame not in FRAMES)):
for object_ in frameobjects:
if (object_['tracker_id'] == id):
listObj[frame - 1]['frame_data'].remove(object_)
break
# if object is not present in this frame, then we calculate the object for this frame
cur = mathOps.getInterpolated(baseObject=baseObject,
baseObjectFrame=baseObjectFrame,
nextObject=nextObject,
nextObjectFrame=nextObjectFrame,
curFrame=frame,)
listObj[frame - 1]['frame_data'].append(cur)
self.rec_frame_for_id(id, frame)
self.load_objects_to_json__orjson(listObj)
frames = range(first_frame_idx - 1, last_frame_idx, 1)
self.calculate_trajectories(frames)
self.main_video_frames_slider_changed()
def interpolate_with_sam(self, idsLISTX, only_edited=False):
"""
Summary:
This function is called when the user chooses the "Interpolate with SAM".
It interpolates and inhance the objects with the given ids using SAM.
Args:
idsLISTX (list): The list of ids of the objects.
"""
self.waitWindow(
visible=True, text=f'Please Wait.\nIDs are being interpolated with SAM...')
if self.sam_model_comboBox.currentText() == "Select Model (SAM disabled)":
MsgBox.OKmsgBox("SAM is disabled",
f"SAM is disabled.\nPlease enable SAM.")
return
idsLIST = []
first_frame_idxLIST = []
last_frame_idxLIST = []
for id in idsLISTX:
try:
if only_edited:
[minf, maxf] = [min(
self.key_frames['id_' + str(id)]), max(self.key_frames['id_' + str(id)])]
else:
[minf, maxf] = [min(
self.id_frames_rec['id_' + str(id)]), max(self.id_frames_rec['id_' + str(id)])]
except:
continue
if minf == maxf:
continue
first_frame_idxLIST.append(minf)
last_frame_idxLIST.append(maxf)
idsLIST.append(id)
if len(idsLIST) == 0:
return
overwrite = self.featuresOptions['interpolationOverwrite']
listObj = self.load_objects_from_json__orjson()
listObjNEW = copy.deepcopy(listObj)
recordsLIST = [[None for ii in range(
first_frame_idxLIST[i], last_frame_idxLIST[i] + 1)] for i in range(len(idsLIST))]
for i in range(min(first_frame_idxLIST) - 1, max(last_frame_idxLIST), 1):
self.waitWindow(visible=True)
listobjframe = listObj[i]['frame_idx']
frameobjects = listObj[i]['frame_data'].copy()
for object_ in frameobjects:
if (object_['tracker_id'] in idsLIST):
index = idsLIST.index(object_['tracker_id'])
recordsLIST[index][listobjframe -
first_frame_idxLIST[index]] = copy.deepcopy(object_)
listObj[i]['frame_data'].remove(object_)
for frameIDX in range(min(first_frame_idxLIST), max(last_frame_idxLIST) + 1):
QtWidgets.QApplication.processEvents()
if self.interrupted:
self.interrupted = False
break
self.waitWindow(
visible=True, text=f'Please Wait.\nIDs are being interpolated with SAM...\nFrame {frameIDX}')
frameIMAGE = self.get_frame_by_idx(frameIDX)
for ididx in range(len(idsLIST)):
i = frameIDX - first_frame_idxLIST[ididx]
self.waitWindow(visible=True)
if frameIDX < first_frame_idxLIST[ididx] or frameIDX > last_frame_idxLIST[ididx]:
continue
records = recordsLIST[ididx]
if (records[i] != None):
current = copy.deepcopy(records[i])
cur_bbox = current['bbox']
if not overwrite:
listObj[frameIDX - 1]['frame_data'].append(current)
continue
else:
prev_idx = i - 1
current = copy.deepcopy(records[i - 1])
next_idx = i + 1
for j in range(i + 1, len(records)):
self.waitWindow(visible=True)
if (records[j] != None):
next_idx = j
break
cur_bbox = ((next_idx - i) / (next_idx - prev_idx)) * np.array(records[prev_idx]['bbox']) + (
(i - prev_idx) / (next_idx - prev_idx)) * np.array(records[next_idx]['bbox'])
cur_bbox = [int(cur_bbox[i]) for i in range(len(cur_bbox))]
current['bbox'] = copy.deepcopy(cur_bbox)
records[i] = current
try:
same_image = self.sam_predictor.check_image(
frameIMAGE)
except:
return
cur_bbox, cur_segment = self.sam_enhanced_bbox_segment(
frameIMAGE, cur_bbox, 1.2, max_itr=5, forSHAPE=False)
current['bbox'] = copy.deepcopy(cur_bbox)
current['segment'] = copy.deepcopy(cur_segment)
# append the shape frame by frame (cause we already removed it in the prev. for loop)
listObj[frameIDX - 1]['frame_data'].append(current)
self.rec_frame_for_id(idsLIST[ididx], frameIDX)
# update frame by frame to the to-be-uploaded listObj
listObjNEW[frameIDX - 1] = copy.deepcopy(listObj[frameIDX - 1])
self.load_objects_to_json__orjson(listObjNEW)
self.calculate_trajectories(
range(min(first_frame_idxLIST) - 1, max(last_frame_idxLIST), 1))
self.main_video_frames_slider_changed()
# Notify the user that the interpolation is finished
self._config = get_config()
if not self._config["mute"]:
if not self.isActiveWindow():
notification.PopUp("SAM Interpolation Completed")
def get_frame_by_idx(self, frameIDX):
self.CAP.set(cv2.CAP_PROP_POS_FRAMES, frameIDX - 1)
success, img = self.CAP.read()
return img
def scaleMENU(self):
"""
Summary:
This function is called when the user presses the "Scale" button.
It scales the selected shape.
"""
if len(self.canvas.selectedShapes) != 1:
MsgBox.OKmsgBox(f'Scale error',
f'There is {len(self.canvas.selectedShapes)} selected shapes. Please select only one shape to scale.')
return
result = scaleObject_UI.PopUp(self)
if result == QtWidgets.QDialog.DialogCode.Accepted:
self.update_current_frame_annotation_button_clicked()
return
else:
self.main_video_frames_slider_changed()
return
def ctrlCopy(self):
"""
Summary:
This function is called when the user presses the "Copy" button.
It copies the selected shape(s).
"""
if len(self.canvas.selectedShapes) == 0:
return
self.copiedShapes = copy.deepcopy(self.canvas.selectedShapes)
def ctrlPaste(self):
"""
Summary:
This function is called when the user presses the "Paste" button.
It pastes the copied shape(s).
"""
if len(self.copiedShapes) == 0:
return
ids = [shape.group_id for shape in self.canvas.shapes]
flag = False
for shape in self.copiedShapes:
if shape.group_id in ids:
flag = True
continue
self.canvas.shapes.append(shape)
self.addLabel(shape)
self.rec_frame_for_id(shape.group_id, self.INDEX_OF_CURRENT_FRAME)
if flag:
MsgBox.OKmsgBox("IDs already exist",
"A Shape(s) with the same ID(s) already exist(s) in this frame.\n\nShapes with no duplicate IDs are Copied Successfully.")
if self.current_annotation_mode == "video":
self.update_current_frame_annotation_button_clicked()
def fileSearchChanged(self):
self.importDirImages(
self.lastOpenDir,
pattern=self.fileSearch.text(),
load=False,
)
def fileSelectionChanged(self):
items = self.fileListWidget.selectedItems()
if not items:
return
item = items[0]
if not self.mayContinue():
return
currIndex = self.imageList.index(str(item.text()))
if currIndex < len(self.imageList):
filename = self.imageList[currIndex]
if filename:
self.loadFile(filename)
self.refresh_image_MODE()
# React to canvas signals.
def shapeSelectionChanged(self, selected_shapes):
try:
self._noSelectionSlot = True
for shape in self.canvas.selectedShapes:
shape.selected = False
self.labelList.clearSelection()
self.canvas.selectedShapes = selected_shapes
for shape in self.canvas.selectedShapes:
shape.selected = True
item = self.labelList.findItemByShape(shape)
self.labelList.selectItem(item)
self.labelList.scrollToItem(item)
self._noSelectionSlot = False
n_selected = len(selected_shapes)
self.actions.delete.setEnabled(n_selected)
self.actions.copy.setEnabled(n_selected)
self.actions.edit.setEnabled(n_selected == 1)
except Exception as e:
pass
def addLabel(self, shape):
if shape.group_id is None or self.current_annotation_mode != "video":
text = shape.label
else:
text = f' ID {shape.group_id}: {shape.label}'
label_list_item = LabelListWidgetItem(text, shape)
self.labelList.addItem(label_list_item)
if not self.uniqLabelList.findItemsByLabel(shape.label):
item = self.uniqLabelList.createItemFromLabel(shape.label)
self.uniqLabelList.addItem(item)
rgb = self._get_rgb_by_label(shape.label)
self.uniqLabelList.setItemLabel(item, shape.label, rgb)
self.labelDialog.addLabelHistory(shape.label)
for action in self.actions.onShapesPresent:
action.setEnabled(True)
rgb = self._get_rgb_by_label(shape.label)
r, g, b = rgb
label_list_item.setText(
'{} <font color="#{:02x}{:02x}{:02x}">●</font>'.format(
text, r, g, b
)
)
shape.line_color = QtGui.QColor(r, g, b)
shape.vertex_fill_color = QtGui.QColor(r, g, b)
shape.hvertex_fill_color = QtGui.QColor(255, 255, 255)
shape.fill_color = QtGui.QColor(r, g, b, 128)
shape.select_line_color = QtGui.QColor(255, 255, 255)
shape.select_fill_color = QtGui.QColor(r, g, b, 155)
def _get_rgb_by_label(self, label):
if self._config["shape_color"] == "auto":
label_ascii = sum([ord(c) for c in label])
idx = label_ascii % len(color_palette)
color = color_palette[idx]
# convert color from bgr to rgb
return color[::-1]
elif (
self._config["shape_color"] == "manual"
and self._config["label_colors"]
and label in self._config["label_colors"]
):
return self._config["label_colors"][label]
elif self._config["default_shape_color"]:
return self._config["default_shape_color"]
def remLabels(self, shapes):
for shape in shapes:
item = self.labelList.findItemByShape(shape)
self.labelList.removeItem(item)
def loadShapes(self, shapes, replace=True):
self._noSelectionSlot = True
# sort shapes by group_id but only if its not None
shapes = sorted(shapes, key=lambda x: int(x.group_id)
if x.group_id is not None else 0)
for shape in shapes:
self.addLabel(shape)
self.labelList.clearSelection()
self._noSelectionSlot = False
self.canvas.loadShapes(shapes, replace=replace)
for shape in self.canvas.shapes:
self.canvas.setShapeVisible(
shape, self.CURRENT_ANNOATAION_FLAGS["polygons"])
def loadLabels(self, shapes, replace=True):
s = []
for shape in shapes:
label = shape["label"]
points = shape["points"]
bbox = shape["bbox"]
shape_type = shape["shape_type"]
content = shape["content"]
group_id = shape["group_id"]
if not points:
# skip point-empty shape
continue
shape = Shape(
label=label,
shape_type=shape_type,
group_id=group_id,
content=content,
)
for i in range(0, len(points), 2):
shape.addPoint(QtCore.QPointF(points[i], points[i + 1]))
shape.close()
default_flags = {}
if self._config["label_flags"]:
for pattern, keys in self._config["label_flags"].items():
if re.match(pattern, label):
for key in keys:
default_flags[key] = False
shape.flags = default_flags
s.append(shape)
self.loadShapes(s, replace=replace)
def loadFlags(self, flags):
self.flag_widget.clear()
for key, flag in flags.items():
item = QtWidgets.QListWidgetItem(key)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.CheckState.Checked if flag else Qt.CheckState.Unchecked)
self.flag_widget.addItem(item)
def saveLabels(self, filename):
lf = LabelFile()
def format_shape(s):
data = s.other_data.copy()
data.update(
dict(
label=s.label.encode("utf-8") if PY2 else s.label,
# convert points into 1D array
points=mathOps.flattener(s.points),
bbox=s.bbox,
group_id=s.group_id,
content=s.content,
shape_type=s.shape_type,
flags=s.flags,
)
)
return data
shapes = [format_shape(item.shape()) for item in self.labelList]
flags = {}
for i in range(self.flag_widget.count()):
item = self.flag_widget.item(i)
key = item.text()
flag = item.checkState() == Qt.CheckState.Checked
flags[key] = flag
try:
imagePath = osp.relpath(self.imagePath, osp.dirname(filename))
imageData = self.imageData if self._config["store_data"] else None
if osp.dirname(filename) and not osp.exists(osp.dirname(filename)):
os.makedirs(osp.dirname(filename))
lf.save(
filename=filename,
shapes=shapes,
imagePath=imagePath,
imageData=imageData,
imageHeight=self.image.height(),
imageWidth=self.image.width(),
otherData=self.otherData,
flags=flags,
)
self.labelFile = lf
items = self.fileListWidget.findItems(
self.imagePath, Qt.MatchFlag.MatchExactly
)
if len(items) > 0:
if len(items) != 1:
raise RuntimeError("There are duplicate files.")
items[0].setCheckState(Qt.CheckState.Checked)
# disable allows next and previous image to proceed
return True
except LabelFileError as e:
self.errorMessage(
self.tr("Error saving label data"), self.tr("<b>%s</b>") % e
)
return False
def copySelectedShape(self):
added_shapes = self.canvas.copySelectedShapes()
self.labelList.clearSelection()
for shape in added_shapes:
self.addLabel(shape)
self.setDirty()
def labelSelectionChanged(self):
if self._noSelectionSlot:
return
if self.canvas.editing():
selected_shapes = []
for item in self.labelList.selectedItems():
selected_shapes.append(item.shape())
if selected_shapes:
self.canvas.selectShapes(selected_shapes)
else:
self.canvas.deSelectShape()
def labelItemChanged(self, item):
shape = item.shape()
self.canvas.setShapeVisible(shape, item.checkState() == Qt.CheckState.Checked)
def labelOrderChanged(self):
self.setDirty()
self.canvas.loadShapes([item.shape() for item in self.labelList])
# Callback functions:
def newShape(self):
"""Pop-up and give focus to the label editor.
position MUST be in global coordinates.
"""
items = self.uniqLabelList.selectedItems()
text = None
if items:
text = items[0].data(Qt.ItemDataRole.UserRole)
flags = {}
group_id = None
if self._config["display_label_popup"] or not text:
previous_text = self.labelDialog.edit.text()
text, flags, group_id, content = self.labelDialog.popUp(text)
if not text:
self.labelDialog.edit.setText(previous_text)
if text and not self.validateLabel(text):
self.errorMessage(
self.tr("Invalid label"),
self.tr("Invalid label '{}' with validation type '{}'").format(
text, self._config["validate_label"]
),
)
text = ""
if text == "SAM instance":
text = "SAM instance - confirmed"
if self.current_annotation_mode == "video":
group_id, text = getIDfromUser_UI.PopUp(self, group_id, text)
if text:
if group_id is None:
group_id = self.minID
self.minID -= 1
else:
self.minID = min(self.minID, group_id - 1)
if self.canvas.SAM_mode == "finished":
self.current_sam_shape["label"] = text
self.current_sam_shape["group_id"] = group_id
else:
self.labelList.clearSelection()
# shape below is of type qt shape
shape = self.canvas.setLastLabel(text, flags)
shape.group_id = group_id
shape.content = content
self.addLabel(shape)
self.rec_frame_for_id(group_id, self.INDEX_OF_CURRENT_FRAME)
self.actions.editMode.setEnabled(True)
self.actions.undoLastPoint.setEnabled(False)
self.actions.undo.setEnabled(True)
self.setDirty()
self.refresh_image_MODE()
else:
if self.canvas.SAM_mode == "finished":
self.current_sam_shape["label"] = text
self.current_sam_shape["group_id"] = -1
self.canvas.SAM_mode = ""
else:
self.canvas.undoLastLine()
self.canvas.shapesBackups.pop()
if self.current_annotation_mode == "video":
self.update_current_frame_annotation_button_clicked()
self.update_current_frame_annotation_button_clicked()
def scrollRequest(self, delta, orientation):
units = -delta * 0.1 # natural scroll
bar = self.scrollBars[orientation]
value = bar.value() + bar.singleStep() * units
self.setScroll(orientation, value)
def setScroll(self, orientation, value):
self.scrollBars[orientation].setValue(value)
self.scroll_values[orientation][self.filename] = value
def setZoom(self, value):
self.actions.fitWidth.setChecked(False)
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.MANUAL_ZOOM
self.zoomWidget.setValue(value)
self.zoom_values[self.filename] = (self.zoomMode, value)
def addZoom(self, increment=1.1):
zoom_value = self.zoomWidget.value() * increment
if increment > 1:
zoom_value = math.ceil(zoom_value)
else:
zoom_value = math.floor(zoom_value)
self.setZoom(zoom_value)
def zoomRequest(self, delta, pos):
canvas_width_old = self.canvas.width()
units = 1.1
if delta < 0:
units = 0.9
self.addZoom(units)
canvas_width_new = self.canvas.width()
if canvas_width_old != canvas_width_new:
canvas_scale_factor = canvas_width_new / canvas_width_old
x_shift = round(pos.x() * canvas_scale_factor) - pos.x()
y_shift = round(pos.y() * canvas_scale_factor) - pos.y()
self.setScroll(
Qt.Orientation.Horizontal,
self.scrollBars[Qt.Orientation.Horizontal].value() + x_shift,
)
self.setScroll(
Qt.Orientation.Vertical,
self.scrollBars[Qt.Orientation.Vertical].value() + y_shift,
)
def setFitWindow(self, value=True):
if value:
self.actions.fitWidth.setChecked(False)
self.zoomMode = self.FIT_WINDOW if value else self.MANUAL_ZOOM
self.adjustScale()
def setFitWidth(self, value=True):
if value:
self.actions.fitWindow.setChecked(False)
self.zoomMode = self.FIT_WIDTH if value else self.MANUAL_ZOOM
self.adjustScale()
def onNewBrightnessContrast(self, qimage):
self.canvas.loadPixmap(
QtGui.QPixmap.fromImage(qimage), clear_shapes=False
)
def enable_show_cross_line(self, enabled):
self._config["show_cross_line"] = enabled
self.actions.show_cross_line.setChecked(enabled)
self.canvas.set_show_cross_line(enabled)
def brightnessContrast(self, value):
dialog = BrightnessContrastDialog(
utils.img_data_to_pil(self.imageData),
self.onNewBrightnessContrast,
parent=self,
)
brightness, contrast = self.brightnessContrast_values.get(
self.filename, (None, None)
)
if brightness is not None:
dialog.slider_brightness.setValue(brightness)
if contrast is not None:
dialog.slider_contrast.setValue(contrast)
dialog.exec()
brightness = dialog.slider_brightness.value()
contrast = dialog.slider_contrast.value()
self.brightnessContrast_values[self.filename] = (brightness, contrast)
def togglePolygons(self, value):
for item in self.labelList:
item.setCheckState(Qt.CheckState.Checked if value else Qt.CheckState.Unchecked)
def loadFile(self, filename=None):
"""Load the specified file, or the last opened file if None."""
# changing fileListWidget loads file
if filename in self.imageList and (
self.fileListWidget.currentRow() != self.imageList.index(filename)
):
self.fileListWidget.setCurrentRow(self.imageList.index(filename))
self.fileListWidget.repaint()
return
self.resetState()
self.canvas.setEnabled(False)
if filename is None:
filename = self.settings.value("filename", "")
filename = str(filename)
if not QtCore.QFile.exists(filename):
print(f"File {filename} does not exist")
self.errorMessage(
self.tr("Error opening file"),
self.tr("No such file: <b>%s</b>") % filename,
)
return False
# assumes same name, but json extension
self.status(self.tr("Loading %s...") % osp.basename(str(filename)))
label_file = osp.splitext(filename)[0] + ".json"
if self.output_dir:
label_file_without_path = osp.basename(label_file)
label_file = osp.join(self.output_dir, label_file_without_path)
if QtCore.QFile.exists(label_file) and LabelFile.is_label_file(
label_file
):
try:
self.labelFile = LabelFile(label_file)
except LabelFileError as e:
self.errorMessage(
self.tr("Error opening file"),
self.tr(
"<p><b>%s</b></p>"
"<p>Make sure <i>%s</i> is a valid label file."
)
% (e, label_file),
)
self.status(self.tr("Error reading %s") % label_file)
return False
self.imageData = self.labelFile.imageData
self.imagePath = osp.join(
osp.dirname(label_file),
self.labelFile.imagePath,
)
self.otherData = self.labelFile.otherData
else:
self.imageData = LabelFile.load_image_file(filename)
if self.imageData:
self.imagePath = filename
self.labelFile = None
image = QtGui.QImage.fromData(self.imageData)
if image.isNull():
formats = [
"*.{}".format(fmt.data().decode())
for fmt in QtGui.QImageReader.supportedImageFormats()
]
self.errorMessage(
self.tr("Error opening file"),
self.tr(
"<p>Make sure <i>{0}</i> is a valid image file.<br/>"
"Supported image formats: {1}</p>"
).format(filename, ",".join(formats)),
)
self.status(self.tr("Error reading %s") % filename)
return False
self.image = image
self.CURRENT_FRAME_IMAGE = cv2.imread(filename)
self.filename = filename
if self._config["keep_prev"]:
prev_shapes = self.canvas.shapes
self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image))
flags = {k: False for k in self._config["flags"] or []}
if self.labelFile:
self.actions.export.setEnabled(True)
self.CURRENT_SHAPES_IN_IMG = self.labelFile.shapes
self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image))
self.loadLabels(self.labelFile.shapes)
if self.labelFile.flags is not None:
flags.update(self.labelFile.flags)
self.loadFlags(flags)
if self._config["keep_prev"] and self.noShapes():
self.loadShapes(prev_shapes, replace=False)
self.setDirty()
else:
self.setClean()
self.canvas.setEnabled(True)
# set zoom values
is_initial_load = not self.zoom_values
if self.filename in self.zoom_values:
self.zoomMode = self.zoom_values[self.filename][0]
self.setZoom(self.zoom_values[self.filename][1])
elif is_initial_load or not self._config["keep_prev_scale"]:
self.adjustScale(initial=True)
# set scroll values
for orientation in self.scroll_values:
if self.filename in self.scroll_values[orientation]:
self.setScroll(
orientation, self.scroll_values[orientation][self.filename]
)
# after loading the image, clear SAM instance if exists
if self.sam_predictor is not None:
self.sam_predictor.clear_logit()
self.canvas.SAM_coordinates = []
# set brightness constrast values
dialog = BrightnessContrastDialog(
utils.img_data_to_pil(self.imageData),
self.onNewBrightnessContrast,
parent=self,
)
brightness, contrast = self.brightnessContrast_values.get(
self.filename, (None, None)
)
if self._config["keep_prev_brightness"] and self.recentFiles:
brightness, _ = self.brightnessContrast_values.get(
self.recentFiles[0], (None, None)
)
if self._config["keep_prev_contrast"] and self.recentFiles:
_, contrast = self.brightnessContrast_values.get(
self.recentFiles[0], (None, None)
)
if brightness is not None:
dialog.slider_brightness.setValue(brightness)
if contrast is not None:
dialog.slider_contrast.setValue(contrast)
self.brightnessContrast_values[self.filename] = (brightness, contrast)
if brightness is not None or contrast is not None:
dialog.onNewValue(None)
self.paintCanvas()
self.addRecentFile(self.filename)
self.toggleActions(True)
self.canvas.setFocus()
self.status(self.tr("Loaded %s") % osp.basename(str(filename)))
return True
def resizeEvent(self, event):
if (
self.canvas
and not self.image.isNull()
and self.zoomMode != self.MANUAL_ZOOM
):
self.adjustScale()
super(MainWindow, self).resizeEvent(event)
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
value = int(100 * value)
self.zoomWidget.setValue(value)
self.zoom_values[self.filename] = (self.zoomMode, value)
def scaleFitWindow(self):
"""Figure out the size of the pixmap to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def enableSaveImageWithData(self, enabled):
self._config["store_data"] = enabled
self.actions.saveWithImageData.setChecked(enabled)
def closeEvent(self, event):
if not self.mayContinue():
event.ignore()
else:
self.Escape_clicked()
self.settings.setValue(
"filename", self.filename if self.filename else ""
)
self.settings.setValue("window/size", self.size())
self.settings.setValue("window/position", self.pos())
self.settings.setValue("window/state", self.saveState())
self.settings.setValue("recentFiles", self.recentFiles)
# ask the use for where to save the labels
# self.settings.setValue('window/geometry', self.saveGeometry())
def dragEnterEvent(self, event):
extensions = [
".%s" % fmt.data().decode().lower()
for fmt in QtGui.QImageReader.supportedImageFormats()
]
if event.mimeData().hasUrls():
items = [i.toLocalFile() for i in event.mimeData().urls()]
if any([i.lower().endswith(tuple(extensions)) for i in items]):
event.accept()
else:
event.ignore()
def dropEvent(self, event):
if not self.mayContinue():
event.ignore()
return
items = [i.toLocalFile() for i in event.mimeData().urls()]
self.importDroppedImageFiles(items)
# User Dialogs #
def loadRecent(self, filename):
if self.mayContinue():
self.loadFile(filename)
def change_curr_model(self, model_name):
"""
Summary:
Change current model to the model_name
Args:
model_name (str): name of the model to be changed to
"""
self.multi_model_flag = False
self.waitWindow(
visible=True, text=f'Please Wait.\n{model_name} is being Loaded...')
self.intelligenceHelper.current_model_name, self.intelligenceHelper.current_mm_model = self.intelligenceHelper.make_mm_model(
model_name)
self.waitWindow()
def model_explorer(self):
"""
Summary:
Open model explorer dialog to select or download models
"""
self._config = get_config()
model_explorer_dialog = utils.ModelExplorerDialog(
self, self._config["mute"], notification.PopUp)
# make it fit its contents
model_explorer_dialog.adjustSize()
model_explorer_dialog.setMinimumWidth(
model_explorer_dialog.table.width() * 1.5)
model_explorer_dialog.setMinimumHeight(
model_explorer_dialog.table.rowHeight(0) * 10)
model_explorer_dialog.exec()
# init intelligence again if it's the first model
if self.helper_first_time_flag:
try:
self.intelligenceHelper = Intelligence(self)
except:
print(
"it seems you have a problem with initializing model\ncheck you have at least one model")
self.helper_first_time_flag = True
else:
self.helper_first_time_flag = False
mathOps.update_saved_models_json(os.getcwd())
selected_model_name, config, checkpoint = model_explorer_dialog.selected_model
if selected_model_name != -1:
self.intelligenceHelper.current_model_name, self.intelligenceHelper.current_mm_model = self.intelligenceHelper.make_mm_model_more(
selected_model_name, config, checkpoint)
self.updateSamControls()
def openNextImg(self, _value=False, load=True):
self.refresh_image_MODE()
keep_prev = self._config["keep_prev"]
if not self.mayContinue():
return
if len(self.imageList) <= 0:
return
filename = None
if self.filename is None:
filename = self.imageList[0]
else:
currIndex = self.imageList.index(self.filename)
if currIndex + 1 < len(self.imageList):
filename = self.imageList[currIndex + 1]
else:
filename = self.imageList[-1]
self.filename = filename
if self.filename and load:
self.loadFile(self.filename)
self._config["keep_prev"] = keep_prev
self.refresh_image_MODE()
def openFile(self, _value=False):
self.actions.export.setEnabled(False)
try:
cv2.destroyWindow('video processing')
except:
pass
if not self.mayContinue():
return
path = osp.dirname(str(self.filename)) if self.filename else "."
formats = [
"*.{}".format(fmt.data().decode())
for fmt in QtGui.QImageReader.supportedImageFormats()
]
filters = self.tr("Image & Label files (%s)") % " ".join(
formats + ["*%s" % LabelFile.suffix]
)
filename = QtWidgets.QFileDialog.getOpenFileName(
self,
self.tr("%s - Choose Image or Label file") % __appname__,
path,
filters,
)
filename, _ = filename
filename = str(filename)
if filename:
self.reset_for_new_mode("img")
self.loadFile(filename)
self.refresh_image_MODE()
self.set_video_controls_visibility(False)
self.filename = filename
# clear the file list widget
self.fileListWidget.clear()
self.uniqLabelList.clear()
# enable Visualization Options
for option in self.vis_options:
if option in [self.id_checkBox, self.traj_checkBox, self.trajectory_length_lineEdit]:
option.setEnabled(False)
else:
option.setEnabled(True)
def changeOutputDirDialog(self, _value=False):
default_output_dir = self.output_dir
if default_output_dir is None and self.filename:
default_output_dir = osp.dirname(self.filename)
if default_output_dir is None:
default_output_dir = self.currentPath()
output_dir = QtWidgets.QFileDialog.getExistingDirectory(
self,
self.tr("%s - Save/Load Annotations in Directory") % __appname__,
default_output_dir,
QtWidgets.QFileDialog.Option.ShowDirsOnly
| QtWidgets.QFileDialog.Option.DontResolveSymlinks,
)
output_dir = str(output_dir)
if not output_dir:
return
self.output_dir = output_dir
self.statusBar().showMessage(
self.tr("%s . Annotations will be saved/loaded in %s")
% ("Change Annotations Dir", self.output_dir)
)
self.statusBar().show()
current_filename = self.filename
self.importDirImages(self.lastOpenDir, load=False)
if current_filename in self.imageList:
# retain currently selected file
self.fileListWidget.setCurrentRow(
self.imageList.index(current_filename)
)
self.fileListWidget.repaint()
def saveFile(self, _value=False):
assert not self.image.isNull(), "cannot save empty image"
if self.labelFile:
# DL20180323 - overwrite when in directory
self.save_path = self.labelFile.filename
self._saveFile(self.save_path)
elif self.output_file:
self.save_path = self.output_file
self._saveFile(self.save_path)
self.close()
else:
self.save_path = self.saveFileDialog()
self._saveFile(self.save_path)
if self.save_path is not None and self.save_path != "":
self.actions.export.setEnabled(True)
def exportData(self):
"""
Export data to COCO, MOT, video, and custom exports, depending on the current annotation mode.
If the current annotation mode is "video", the function prompts the user to select which types of exports to perform
(COCO, MOT, video, and/or custom exports), and then prompts the user to select the output file path for each export type
that was selected. The function then exports the data to the selected file paths.
If the current annotation mode is "img" or "dir", the function prompts the user to select the output file path for a COCO
export, and then exports the data to the selected file path.
If an error occurs during the export process, the function displays an error message. Otherwise, the function displays
a success message.
"""
try:
if self.current_annotation_mode == "video":
# Get user input for export options
result, coco_radio, mot_radio, video_radio, custom_exports_radio_checked_list = exportData_UI.PopUp()
if not result:
return
json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json'
pth = ""
# Check which radio button is checked and export accordingly
if video_radio:
# Get user input for video export path
folderDialog = utils.FolderDialog(
"tracking_results.mp4", "mp4")
if folderDialog.exec():
pth = self.export_as_video_button_clicked(
folderDialog.selectedFiles()[0])
else:
return
if coco_radio:
# Get user input for COCO export path
folderDialog = utils.FolderDialog("coco.json", "json")
if folderDialog.exec():
pth = utils.exportCOCOvid(
json_file_name, self.CURRENT_VIDEO_WIDTH, self.CURRENT_VIDEO_HEIGHT, folderDialog.selectedFiles()[0])
else:
return
if mot_radio:
# Get user input for MOT export path
folderDialog = utils.FolderDialog("mot.txt", "txt")
if folderDialog.exec():
pth = utils.exportMOT(
json_file_name, folderDialog.selectedFiles()[0])
else:
return
# custom exports
custom_exports_list_video = [
custom_export for custom_export in custom_exports_list if custom_export.mode == "video"]
if len(custom_exports_radio_checked_list) != 0:
for i in range(len(custom_exports_radio_checked_list)):
if custom_exports_radio_checked_list[i]:
# Get user input for custom export path
folderDialog = utils.FolderDialog(
f"{custom_exports_list_video[i].file_name}.{custom_exports_list_video[i].format}", custom_exports_list_video[i].format)
if folderDialog.exec():
try:
pth = custom_exports_list_video[i](
json_file_name, self.CURRENT_VIDEO_WIDTH, self.CURRENT_VIDEO_HEIGHT, folderDialog.selectedFiles()[0])
except Exception as e:
MsgBox.OKmsgBox(
f"Error", f"Error: with custom export {custom_exports_list_video[i].button_name}\n check the parameters matches the specified ones in custom_exports.py\n Error Message: {e}", "critical")
else:
return
# Image and Directory modes
elif self.current_annotation_mode == "img" or self.current_annotation_mode == "dir":
result, coco_radio, custom_exports_radio_checked_list = exportData_UI.PopUp(
mode="image")
if not result:
return
save_path = self.save_path if self.save_path else self.labelFile.filename
json_paths = utils.parse_img_export(self.target_directory, save_path)
# Check which radio button is checked and export accordingly
# COCO export
if coco_radio:
# Get user input for COCO export path
folderDialog = utils.FolderDialog("coco.json", "json")
if folderDialog.exec():
pth = utils.exportCOCO(
json_paths, folderDialog.selectedFiles()[0])
else:
return
# custom exports
custom_exports_list_image = [
custom_export for custom_export in custom_exports_list if custom_export.mode == "image"]
if len(custom_exports_radio_checked_list) != 0:
for i in range(len(custom_exports_radio_checked_list)):
if custom_exports_radio_checked_list[i]:
# Get user input for custom export path
folderDialog = utils.FolderDialog(
f"{custom_exports_list_image[i].file_name}.{custom_exports_list_image[i].format}", custom_exports_list_image[i].format)
if folderDialog.exec():
try:
pth = custom_exports_list_image[i](
json_paths, folderDialog.selectedFiles()[0])
except Exception as e:
MsgBox.OKmsgBox(
f"Error", f"Error: with custom export {custom_exports_list_image[i].button_name}\n check the parameters matches the specified ones in custom_exports.py\n Error Message: {e}", "critical")
else:
return
except Exception as e:
# Error QMessageBox
msg = QtWidgets.QMessageBox()
msg.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msg.setText(f"Error\n {e}")
msg.setWindowTitle(
"Export Error")
# print exception and error line to terminal
print(e)
msg.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok)
msg.exec()
return
else:
# display QMessageBox with ok button and label "Exporting COCO"
msg = QtWidgets.QMessageBox()
try:
if pth not in ["", None, False]:
msg.setIcon(QtWidgets.QMessageBox.Icon.Information)
msg.setText(f"Annotations exported successfully to {pth}")
msg.setWindowTitle("Export Success")
else:
msg.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msg.setText(f"Export Failed")
msg.setWindowTitle("Export Failed")
except:
msg.setIcon(QtWidgets.QMessageBox.Icon.Critical)
msg.setText(f"Export Failed")
msg.setWindowTitle("Export Failed")
msg.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok)
msg.exec()
def saveFileAs(self, _value=False):
self.actions.export.setEnabled(True)
assert not self.image.isNull(), "cannot save empty image"
self.save_path = self.saveFileDialog()
self._saveFile(self.save_path)
def saveFileDialog(self):
caption = self.tr("%s - Choose File") % __appname__
filters = self.tr("Label files (*%s)") % LabelFile.suffix
if self.output_dir:
dlg = QtWidgets.QFileDialog(
self, caption, self.output_dir, filters
)
else:
dlg = QtWidgets.QFileDialog(
self, caption, self.currentPath(), filters
)
dlg.setDefaultSuffix(LabelFile.suffix[1:])
dlg.setAcceptMode(QtWidgets.QFileDialog.AcceptMode.AcceptSave)
dlg.setOption(QtWidgets.QFileDialog.Option.DontConfirmOverwrite, False)
dlg.setOption(QtWidgets.QFileDialog.Option.DontUseNativeDialog, False)
basename = osp.basename(osp.splitext(self.filename)[0])
if self.output_dir:
default_labelfile_name = osp.join(
self.output_dir, basename + LabelFile.suffix
)
else:
default_labelfile_name = osp.join(
self.currentPath(), basename + LabelFile.suffix
)
filename = dlg.getSaveFileName(
self,
self.tr("Choose File"),
default_labelfile_name,
self.tr("Label files (*%s)") % LabelFile.suffix,
)
if isinstance(filename, tuple):
filename, _ = filename
return filename
def _saveFile(self, filename):
if filename and self.saveLabels(filename):
self.addRecentFile(filename)
self.setClean()
def closeFile(self, _value=False):
if not self.mayContinue():
return
self.resetState()
self.setClean()
self.toggleActions(False)
self.canvas.setEnabled(False)
self.actions.saveAs.setEnabled(False)
# clear the file list widget
self.fileListWidget.clear()
self.uniqLabelList.clear()
self.current_annotation_mode = ""
self.right_click_menu()
for option in self.vis_options:
option.setEnabled(False)
def getLabelFile(self):
if self.filename.lower().endswith(".json"):
label_file = self.filename
else:
label_file = osp.splitext(self.filename)[0] + ".json"
return label_file
def deleteFile(self):
mb = QtWidgets.QMessageBox
msg = self.tr(
"You are about to permanently delete this label file, "
"proceed anyway?"
)
answer = mb.warning(self, self.tr("Attention"), msg, mb.StandardButton.Yes | mb.StandardButton.No)
if answer != mb.StandardButton.Yes:
return
label_file = self.getLabelFile()
if osp.exists(label_file):
os.remove(label_file)
logger.info("Label file is removed: {}".format(label_file))
item = self.fileListWidget.currentItem()
item.setCheckState(Qt.CheckState.Unchecked)
self.resetState()
# Message Dialogs. #
def hasLabels(self):
if self.noShapes():
self.errorMessage(
"No objects labeled",
"You must label at least one object to save the file.",
)
return False
return True
def hasLabelFile(self):
if self.filename is None:
return False
label_file = self.getLabelFile()
return osp.exists(label_file)
def mayContinue(self):
if not self.dirty:
return True
mb = QtWidgets.QMessageBox
msg = self.tr('Save annotations to "{}" before closing?').format(
self.filename
)
answer = mb.question(
self,
self.tr("Save annotations?"),
msg,
mb.StandardButton.Save | mb.StandardButton.Discard | mb.StandardButton.Cancel,
mb.StandardButton.Save,
)
if answer == mb.StandardButton.Discard:
return True
elif answer == mb.StandardButton.Save:
self.saveFile()
return True
else: # answer == mb.Cancel
return False
def errorMessage(self, title, message):
msg_box = QtWidgets.QMessageBox(QtWidgets.QMessageBox.Icon.Critical, title, message)
msg_box.setStandardButtons(QtWidgets.QMessageBox.StandardButton.Ok)
return msg_box
def currentPath(self):
return osp.dirname(str(self.filename)) if self.filename else "."
def toggleKeepPrevMode(self):
self._config["keep_prev"] = not self._config["keep_prev"]
def removeSelectedPoint(self):
self.canvas.removeSelectedPoint()
if not self.canvas.hShape.points:
self.canvas.deleteShape(self.canvas.hShape)
self.remLabels([self.canvas.hShape])
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
def deleteSelectedShape(self):
try:
if len(self.canvas.selectedShapes) == 0:
return
yes, no = QtWidgets.QMessageBox.StandardButton.Yes, QtWidgets.QMessageBox.StandardButton.No
msg = self.tr(
"You are about to permanently delete {} polygons, "
"proceed anyway?"
).format(len(self.canvas.selectedShapes))
if yes == QtWidgets.QMessageBox.warning(
self, self.tr("Attention"), msg, yes | no, yes
):
deleted_shapes = self.canvas.deleteSelected()
deleted_ids = [shape.group_id for shape in deleted_shapes]
self.remLabels(deleted_shapes)
self.setDirty()
if self.noShapes():
for action in self.actions.onShapesPresent:
action.setEnabled(False)
if self.current_annotation_mode == 'img' or self.current_annotation_mode == 'dir':
self.refresh_image_MODE()
return
# if video mode
result, self.featuresOptions, fromFrameVAL, toFrameVAL = deleteSelectedShape_UI.PopUp(
self.TOTAL_VIDEO_FRAMES,
self.INDEX_OF_CURRENT_FRAME,
self.featuresOptions)
if result == QtWidgets.QDialog.DialogCode.Accepted:
for deleted_id in deleted_ids:
self.delete_ids_from_all_frames(
[deleted_id], from_frame=fromFrameVAL, to_frame=toFrameVAL)
self.main_video_frames_slider_changed()
except Exception as e:
MsgBox.OKmsgBox(f"Error", f"Error: {e}", "critical")
def delete_ids_from_all_frames(self, deleted_ids, from_frame, to_frame):
"""
Summary:
Delete ids from a range of frames
Args:
deleted_ids (list): list of ids to be deleted
from_frame (int): starting frame
to_frame (int): ending frame
"""
from_frame, to_frame = np.min(
[from_frame, to_frame]), np.max([from_frame, to_frame])
listObj = self.load_objects_from_json__orjson()
for i in range(from_frame - 1, to_frame, 1):
frame_idx = listObj[i]['frame_idx']
for object_ in listObj[i]['frame_data']:
id = object_['tracker_id']
if id in deleted_ids:
listObj[i]['frame_data'].remove(object_)
self.CURRENT_ANNOATAION_TRAJECTORIES['id_' +
str(id)][frame_idx - 1] = (-1, -1)
self.rec_frame_for_id(id, frame_idx, type_='remove')
self.load_objects_to_json__orjson(listObj)
def copyShape(self):
"""
Summary:
Copy selected shape in right click menu.
is NOT saved in the clipboard
"""
if len(self.canvas.selectedShapes) > 1 and self.current_annotation_mode == 'video':
org = copy.deepcopy(self.canvas.shapes)
self.canvas.endMove(copy=True)
self.canvas.undoLastLine()
self.canvas.shapesBackups.pop()
self.canvas.shapes = org
self.update_current_frame_annotation_button_clicked()
return
elif self.current_annotation_mode == 'video':
self.canvas.endMove(copy=True)
shape = self.canvas.selectedShapes[0]
text = shape.label
text, flags, group_id, content = self.labelDialog.popUp(text)
shape.group_id = -1
shape.content = content
shape.label = text
shape.flags = flags
group_id, text = getIDfromUser_UI.PopUp(self, group_id, text)
if text:
self.labelList.clearSelection()
shape = self.canvas.setLastLabel(text, flags)
shape.group_id = group_id
self.addLabel(shape)
self.rec_frame_for_id(
shape.group_id, self.INDEX_OF_CURRENT_FRAME)
self.actions.editMode.setEnabled(True)
self.actions.undoLastPoint.setEnabled(False)
self.actions.undo.setEnabled(True)
self.setDirty()
else:
self.canvas.undoLastLine()
self.canvas.shapesBackups.pop()
self.update_current_frame_annotation_button_clicked()
return
self.canvas.endMove(copy=True)
for shape in self.canvas.selectedShapes:
self.addLabel(shape)
self.labelList.clearSelection()
self.setDirty()
def moveShape(self):
self.canvas.endMove(copy=False)
self.setDirty()
if self.current_annotation_mode == 'video':
self.update_current_frame_annotation_button_clicked()
def openDirDialog(self, _value=False, dirpath=None):
if not self.mayContinue():
return
defaultOpenDirPath = dirpath if dirpath else "."
if self.lastOpenDir and osp.exists(self.lastOpenDir):
defaultOpenDirPath = self.lastOpenDir
else:
defaultOpenDirPath = (
osp.dirname(self.filename) if self.filename else "."
)
targetDirPath = str(
QtWidgets.QFileDialog.getExistingDirectory(
self,
self.tr("%s - Open Directory") % __appname__,
defaultOpenDirPath,
QtWidgets.QFileDialog.Option.ShowDirsOnly
| QtWidgets.QFileDialog.Option.DontResolveSymlinks,
)
)
self.target_directory = targetDirPath
self.importDirImages(targetDirPath)
self.set_video_controls_visibility(False)
# enable Visualization Options
for option in self.vis_options:
if option in [self.id_checkBox, self.traj_checkBox, self.trajectory_length_lineEdit]:
option.setEnabled(False)
else:
option.setEnabled(True)
@property
def imageList(self):
lst = []
for i in range(self.fileListWidget.count()):
item = self.fileListWidget.item(i)
lst.append(item.text())
return lst
def importDroppedImageFiles(self, imageFiles):
extensions = [
".%s" % fmt.data().decode().lower()
for fmt in QtGui.QImageReader.supportedImageFormats()
]
self.filename = None
for file in imageFiles:
if file in self.imageList or not file.lower().endswith(
tuple(extensions)
):
continue
label_file = osp.splitext(file)[0] + ".json"
if self.output_dir:
label_file_without_path = osp.basename(label_file)
label_file = osp.join(self.output_dir, label_file_without_path)
item = QtWidgets.QListWidgetItem(file)
# item.setFlags(Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsSelectable)
if QtCore.QFile.exists(label_file) and LabelFile.is_label_file(
label_file
):
item.setCheckState(Qt.CheckState.Checked)
else:
item.setCheckState(Qt.CheckState.Unchecked)
self.fileListWidget.addItem(item)
self.openNextImg()
def importDirImages(self, dirpath, pattern=None, load=True):
self.actions.export.setEnabled(True)
if not self.mayContinue() or not dirpath:
return
self.reset_for_new_mode("dir")
self.lastOpenDir = dirpath
self.filename = None
self.fileListWidget.clear()
self.uniqLabelList.clear()
for filename in self.scanAllImages(dirpath):
if pattern and pattern not in filename:
continue
label_file = osp.splitext(filename)[0] + ".json"
if self.output_dir:
label_file_without_path = osp.basename(label_file)
label_file = osp.join(self.output_dir, label_file_without_path)
item = QtWidgets.QListWidgetItem(filename)
# item.setFlags(Qt.ItemFlag.ItemIsEnabled | Qt.ItemFlag.ItemIsSelectable)
if QtCore.QFile.exists(label_file) and LabelFile.is_label_file(
label_file
):
item.setCheckState(Qt.CheckState.Checked)
else:
item.setCheckState(Qt.CheckState.Unchecked)
self.fileListWidget.addItem(item)
self.openNextImg(load=load)
self.fileListWidget.horizontalScrollBar().setValue(
self.fileListWidget.horizontalScrollBar().maximum()
)
def scanAllImages(self, folderPath):
extensions = [
".%s" % fmt.data().decode().lower()
for fmt in QtGui.QImageReader.supportedImageFormats()
]
images = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = osp.join(root, file)
images.append(relativePath)
images.sort(key=lambda x: x.lower())
return images
def refresh_image_MODE(self, fromSignal=False):
try:
if self.current_annotation_mode == "video" and not fromSignal:
return
self.CURRENT_SHAPES_IN_IMG = mathOps.convert_qt_shapes_to_shapes(self.canvas.shapes)
imageX = visualizations.draw_bb_on_image_MODE(self.CURRENT_ANNOATAION_FLAGS,
self.image,
self.CURRENT_SHAPES_IN_IMG)
self.labelList.clear()
self.canvas.loadPixmap(QtGui.QPixmap.fromImage(imageX))
self.loadLabels(self.CURRENT_SHAPES_IN_IMG)
except:
pass
def annotate_one(self, called_from_tracking=False):
areaFlag = len(self.canvas.tracking_area_polygon) > 2
if areaFlag:
dims = self.CURRENT_FRAME_IMAGE.shape
area_points = self.canvas.tracking_area_polygon
[x1, y1, x2, y2] = mathOps.track_area_adjustedBboex(
area_points, dims, ratio=0.1)
targetImage = self.CURRENT_FRAME_IMAGE[y1: y2, x1: x2]
else:
targetImage = self.CURRENT_FRAME_IMAGE
try:
if self.current_annotation_mode != "video":
if os.path.exists(self.filename):
self.labelList.clearSelection()
if self.multi_model_flag:
shapes = self.intelligenceHelper.get_shapes_of_one(
targetImage, img_array_flag=True, multi_model_flag=True)
else:
shapes = self.intelligenceHelper.get_shapes_of_one(
targetImage, img_array_flag=True)
if areaFlag:
shapes = mathOps.adjust_shapes_to_original_image(
shapes, x1, y1, area_points)
if self.current_annotation_mode == "video" and called_from_tracking:
return shapes
except Exception as e:
MsgBox.OKmsgBox("Error", f"{e}", "critical")
return
imageX = visualizations.draw_bb_on_image_MODE(self.CURRENT_ANNOATAION_FLAGS,
self.image,
shapes)
# clear shapes already in lablelist (fixes saving multiple shapes of same object bug)
self.labelList.clear()
self.CURRENT_SHAPES_IN_IMG = shapes
self.canvas.loadPixmap(QtGui.QPixmap.fromImage(imageX))
self.loadLabels(self.CURRENT_SHAPES_IN_IMG)
self.actions.editMode.setEnabled(True)
self.actions.undoLastPoint.setEnabled(False)
self.actions.undo.setEnabled(True)
self.setDirty()
def annotate_batch(self):
images = []
self._config = get_config()
notif = [self._config["mute"], self, notification.PopUp]
for filename in self.imageList:
images.append(filename)
if self.multi_model_flag:
self.intelligenceHelper.get_shapes_of_batch(
images, multi_model_flag=True, notif=notif)
else:
self.intelligenceHelper.get_shapes_of_batch(images, notif=notif)
def setConfThreshold(self):
# if a threshold exists, pass it as the previous value
if self.intelligenceHelper.conf_threshold:
self.intelligenceHelper.conf_threshold = self.segmentation_options_UI.setConfThreshold(
self.intelligenceHelper.conf_threshold)
# if not, use the default value in the function as the previous value
else:
self.intelligenceHelper.conf_threshold = self.segmentation_options_UI.setConfThreshold()
def setIOUThreshold(self):
# if a threshold exists, pass it as the previous value
if self.intelligenceHelper.iou_threshold:
self.intelligenceHelper.iou_threshold = self.segmentation_options_UI.setIOUThreshold(
self.intelligenceHelper.iou_threshold)
# if not, use the default value in the function as the previous value
else:
self.intelligenceHelper.iou_threshold = self.segmentation_options_UI.setIOUThreshold()
def selectClasses(self):
print(" from intelligenceHelper:" + str(self.intelligenceHelper.selectedclasses))
self.intelligenceHelper.selectedclasses = self.segmentation_options_UI.selectClasses()
def mergeSegModels(self):
print(" from intelligenceHelper:" + str(self.intelligenceHelper.selectedmodels))
self.intelligenceHelper.selectedmodels = self.merge_feature_UI.mergeSegModels()
# check if the user selected any models
if len(self.intelligenceHelper.selectedmodels) == 0:
print("No models selected")
else:
self.multi_model_flag = True
def Segment_anything(self):
# check the visibility of the sam toolbar
if self.sam_toolbar.isVisible():
self.set_sam_toolbar_visibility(False)
else:
self.set_sam_toolbar_visibility(True)
# VIDEO PROCESSING FUNCTIONS (ALL CONNECTED TO THE VIDEO PROCESSING TOOLBAR)
def calculate_trajectories(self, frames=None):
"""
Summary:
Calculate trajectories for all objects in the video
Args:
frames (list): list of frames to calculate trajectories for (default: None -> all frames)
"""
listObj = self.load_objects_from_json__orjson()
if len(listObj) == 0:
return
frames = frames if frames else range(len(listObj))
for i in frames:
listobjframe = listObj[i]['frame_idx']
for object in listObj[i]['frame_data']:
id = object['tracker_id']
self.minID = min(self.minID, id - 1)
self.rec_frame_for_id(id, listobjframe)
label = object['class_name']
label_ascii = sum([ord(c) for c in label])
idx = label_ascii % len(color_palette)
color = color_palette[idx]
center = mathOps.centerOFmass(object['segment'])
try:
centers_rec = self.CURRENT_ANNOATAION_TRAJECTORIES['id_' + str(
id)]
try:
(xp, yp) = centers_rec[listobjframe - 2]
(xn, yn) = center
if (xp == -1 or xn == -1):
c = 5 / 0
r = 0.5
x = r * xn + (1 - r) * xp
y = r * yn + (1 - r) * yp
center = (int(x), int(y))
except:
pass
centers_rec[listobjframe - 1] = center
self.CURRENT_ANNOATAION_TRAJECTORIES['id_' +
str(id)] = centers_rec
self.CURRENT_ANNOATAION_TRAJECTORIES['id_color_' + str(
id)] = color
except:
centers_rec = [(-1, - 1)] * int(self.TOTAL_VIDEO_FRAMES)
centers_rec[listobjframe - 1] = center
self.CURRENT_ANNOATAION_TRAJECTORIES['id_' +
str(id)] = centers_rec
self.CURRENT_ANNOATAION_TRAJECTORIES['id_color_' + str(
id)] = color
def right_click_menu(self):
"""
Summary:
Set the right click menu according to the current annotation mode
"""
self.set_sam_toolbar_enable(False)
self.sam_model_comboBox.setCurrentIndex(0)
self.sam_buttons_colors("x")
# # right click menu
# 0 createMode,
# 1 editMode,
# 2 edit,
# 3 enhance,
# 4 interpolate,
# 5 mark_as_key,
# 6 remove_all_keyframes,
# 7 scale,
# 8 copyShapes,
# 9 pasteShapes,
# 10 copy,
# 11 delete,
# 12 undo,
# 13 undoLastPoint,
# 14 addPointToEdge,
# 15 removePoint,
# 16 update_curr_frame,
# 17 ignore_changes
mode = self.current_annotation_mode
video_menu_list = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 14, 15, 16, 17]
image_menu_list = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15]
if self.current_annotation_mode == "video":
self.canvas.menus[0].clear()
utils.addActions(
self.canvas.menus[0], (self.actions.menu[i] for i in video_menu_list))
self.menus.edit.clear()
utils.addActions(
self.menus.edit, (self.actions.menu[i] for i in video_menu_list))
else:
self.canvas.menus[0].clear()
utils.addActions(
self.canvas.menus[0], (self.actions.menu[i] for i in image_menu_list))
self.menus.edit.clear()
utils.addActions(
self.menus.edit, (self.actions.menu[i] for i in image_menu_list))
def reset_for_new_mode(self, mode):
self.CURRENT_ANNOATAION_TRAJECTORIES = {'length': 30,
'alpha': 0.70}
self.key_frames.clear()
self.id_frames_rec.clear()
for shape in self.canvas.shapes:
self.canvas.deleteShape(shape)
self.resetState()
self.CURRENT_SHAPES_IN_IMG = []
self.image = QtGui.QImage()
self.CURRENT_FRAME_IMAGE = None
self.current_annotation_mode = mode
self.canvas.current_annotation_mode = mode
self.right_click_menu()
self.global_listObj = []
self.minID = -2
self.maxID = 0
def openVideo(self):
# enable export if json file exists
try:
cv2.destroyWindow('video processing')
except:
pass
if not self.mayContinue():
return
videoFile = QtWidgets.QFileDialog.getOpenFileName(
self, self.tr("%s - Open Video") % __appname__, ".",
self.tr("Video files (*.mp4 *.avi *.mov)")
)
if videoFile[0]:
# clear the file list widget
self.fileListWidget.clear()
self.uniqLabelList.clear()
self.reset_for_new_mode("video")
self.CURRENT_VIDEO_NAME = videoFile[0].split(
".")[-2].split("/")[-1]
self.CURRENT_VIDEO_PATH = "/".join(
videoFile[0].split(".")[-2].split("/")[:-1])
json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json'
if os.path.exists(json_file_name):
self.actions.export.setEnabled(True)
else:
self.actions.export.setEnabled(False)
cap = cv2.VideoCapture(videoFile[0])
self.CURRENT_VIDEO_HEIGHT = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.CURRENT_VIDEO_WIDTH = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
self.CAP = cap
self.TOTAL_VIDEO_FRAMES = int(
self.CAP.get(cv2.CAP_PROP_FRAME_COUNT))
self.CURRENT_VIDEO_FPS = self.CAP.get(cv2.CAP_PROP_FPS)
self.main_video_frames_slider.setMaximum(self.TOTAL_VIDEO_FRAMES)
self.frames_to_track_slider.setMaximum(
self.TOTAL_VIDEO_FRAMES - self.INDEX_OF_CURRENT_FRAME)
self.main_video_frames_slider.setValue(2)
self.INDEX_OF_CURRENT_FRAME = 1
self.main_video_frames_slider.setValue(self.INDEX_OF_CURRENT_FRAME)
# self.addToolBarBreak
self.set_video_controls_visibility(True)
self.update_tracking_method()
self.calculate_trajectories()
keys = list(self.id_frames_rec.keys())
idsORG = [int(keys[i][3:]) for i in range(len(keys))]
if len(idsORG) > 0:
self.maxID = max(idsORG)
for option in self.vis_options:
option.setEnabled(True)
# disable save and save as
self.actions.save.setEnabled(False)
self.actions.saveAs.setEnabled(False)
def openVideoFrames(self):
try:
video_frame_extractor_dialog = utils.VideoFrameExtractor(
self._config["mute"], notification.PopUp)
video_frame_extractor_dialog.exec()
dir_path_name = video_frame_extractor_dialog.path_name
if dir_path_name:
self.target_directory = dir_path_name
self.importDirImages(dir_path_name)
self.set_video_controls_visibility(False)
# enable Visualization Options
for option in self.vis_options:
if option in [self.id_checkBox, self.traj_checkBox, self.trajectory_length_lineEdit]:
option.setEnabled(False)
else:
option.setEnabled(True)
except Exception as e:
MsgBox.OKmsgBox("Error", f"Error: {e}", "critical")
def load_shapes_for_video_frame(self, json_file_name, index):
# this function loads the shapes for the video frame from the json file
# first we read the json file in the form of a list
# we need to parse from it data for the current frame
target_frame_idx = index
listObj = self.load_objects_from_json__orjson()
listObj = np.array(listObj)
shapes = []
i = target_frame_idx - 1
frame_objects = listObj[i]['frame_data']
for object_ in frame_objects:
shape = {}
shape["label"] = object_["class_name"]
shape["group_id"] = (object_['tracker_id'])
shape["content"] = (object_['confidence'])
shape["bbox"] = object_['bbox']
points = object_['segment']
points = np.array(points, np.int16).flatten().tolist()
shape["points"] = points
shape["shape_type"] = "polygon"
shape["other_data"] = {}
shape["flags"] = {}
shapes.append(shape)
self.CURRENT_SHAPES_IN_IMG = shapes
def loadFramefromVideo(self, frame_array, index=1):
self.resetState()
self.canvas.setEnabled(False)
self.imageData = frame_array.data
self.CURRENT_FRAME_IMAGE = frame_array
image = QtGui.QImage(self.imageData, self.imageData.shape[1], self.imageData.shape[0],
QtGui.QImage.Format.Format_BGR888)
self.image = image
if self._config["keep_prev"]:
prev_shapes = self.canvas.shapes
flags = {k: False for k in self._config["flags"] or []}
self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image))
if self.TrackingMode:
image = self.draw_bb_on_image(image, self.CURRENT_SHAPES_IN_IMG)
self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image))
if len(self.CURRENT_SHAPES_IN_IMG) > 0:
self.loadLabels(self.CURRENT_SHAPES_IN_IMG)
else:
if self.labelFile:
self.CURRENT_SHAPES_IN_IMG = self.labelFile.shapes
image = self.draw_bb_on_image(
image, self.CURRENT_SHAPES_IN_IMG)
self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image))
self.loadLabels(self.labelFile.shapes)
if self.labelFile.flags is not None:
flags.update(self.labelFile.flags)
else:
json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json'
if os.path.exists(json_file_name):
self.load_shapes_for_video_frame(json_file_name, index)
image = self.draw_bb_on_image(
image, self.CURRENT_SHAPES_IN_IMG)
self.canvas.loadPixmap(QtGui.QPixmap.fromImage(image))
if len(self.CURRENT_SHAPES_IN_IMG) > 0:
self.loadLabels(self.CURRENT_SHAPES_IN_IMG)
self.loadFlags(flags)
self.setClean()
self.canvas.setEnabled(True)
# set zoom values
is_initial_load = not self.zoom_values
if self.filename in self.zoom_values:
self.zoomMode = self.zoom_values[self.filename][0]
self.setZoom(self.zoom_values[self.filename][1])
elif is_initial_load or not self._config["keep_prev_scale"]:
self.adjustScale(initial=True)
# set scroll values
self.paintCanvas()
self.toggleActions(True)
self.canvas.setFocus()
self.status(self.tr(
f'Loaded {self.CURRENT_VIDEO_NAME} frame {self.INDEX_OF_CURRENT_FRAME}'))
def nextFrame_buttonClicked(self):
self.update_current_frame_annotation_button_clicked()
# first assert that the new value of the slider is not greater than the total number of frames
new_value = self.INDEX_OF_CURRENT_FRAME + self.FRAMES_TO_SKIP
if new_value >= self.TOTAL_VIDEO_FRAMES:
new_value = self.TOTAL_VIDEO_FRAMES
self.main_video_frames_slider.setValue(new_value)
def next_1_Frame_buttonClicked(self):
self.update_current_frame_annotation_button_clicked()
# first assert that the new value of the slider is not greater than the total number of frames
new_value = self.INDEX_OF_CURRENT_FRAME + 1
if new_value >= self.TOTAL_VIDEO_FRAMES:
new_value = self.TOTAL_VIDEO_FRAMES
self.main_video_frames_slider.setValue(new_value)
def previousFrame_buttonClicked(self):
self.update_current_frame_annotation_button_clicked()
new_value = self.INDEX_OF_CURRENT_FRAME - self.FRAMES_TO_SKIP
if new_value <= 0:
new_value = 0
self.main_video_frames_slider.setValue(new_value)
def previous_1_Frame_buttonclicked(self):
self.update_current_frame_annotation_button_clicked()
new_value = self.INDEX_OF_CURRENT_FRAME - 1
if new_value <= 0:
new_value = 0
self.main_video_frames_slider.setValue(new_value)
def frames_to_skip_slider_changed(self):
self.FRAMES_TO_SKIP = self.frames_to_skip_slider.value()
zeros = (2 - int(np.log10(self.FRAMES_TO_SKIP + 0.9))) * '0'
self.frames_to_skip_label.setText(
'Jump forward/backward frames: ' + zeros + str(self.FRAMES_TO_SKIP))
def playPauseButtonClicked(self):
# we can check the state of the button by checking the button text
if self.playPauseButton_mode == "Play":
self.playPauseButton_mode = "Pause"
self.playPauseButton.setShortcut(self._config['shortcuts']['play'])
self.playPauseButton.setToolTip(
f'Play ({self._config["shortcuts"]["play"]})')
self.playPauseButton.setIcon(
self.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_MediaPause))
# play the video at the current fps untill the user clicks pause
self.play_timer = QtCore.QTimer(self)
# use play_timer.timeout.connect to call a function every time the timer times out
# but we need to call the function every interval of time
# so we need to call the function every 1/fps seconds
self.play_timer.timeout.connect(self.move_frame_by_frame)
self.play_timer.start(40)
# note that the timer interval is in milliseconds
# while self.timer.isActive():
elif self.playPauseButton_mode == "Pause":
# first stop the timer
self.play_timer.stop()
self.playPauseButton_mode = "Play"
self.playPauseButton.setShortcut(self._config['shortcuts']['play'])
self.playPauseButton.setToolTip(
f'Pause ({self._config["shortcuts"]["play"]})')
self.playPauseButton.setIcon(
self.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_MediaPlay))
def move_frame_by_frame(self):
QtWidgets.QApplication.processEvents()
self.main_video_frames_slider.setValue(self.INDEX_OF_CURRENT_FRAME + 1)
def main_video_frames_slider_changed(self):
if self.current_annotation_mode != "video":
return
if self.sam_model_comboBox.currentIndex() != 0 and self.canvas.SAM_mode != "finished" and not self.TrackingMode:
self.sam_clear_annotation_button_clicked()
self.sam_buttons_colors("X")
try:
x = self.CURRENT_VIDEO_PATH
except:
return
frame_idx = self.main_video_frames_slider.value()
self.INDEX_OF_CURRENT_FRAME = frame_idx
self.CAP.set(cv2.CAP_PROP_POS_FRAMES, frame_idx - 1)
# setting text of labels
fps = self.CAP.get(cv2.CAP_PROP_FPS)
zeros = (int(np.log10(self.TOTAL_VIDEO_FRAMES + 0.9)) -
int(np.log10(frame_idx + 0.9))) * '0'
self.main_video_frames_label_1.setText(
f'frame {zeros}{frame_idx} / {int(self.TOTAL_VIDEO_FRAMES)}')
self.frame_time = mathOps.mapFrameToTime(frame_idx, fps)
frame_text = ("%02d:%02d:%02d:%03d" % (
self.frame_time[0], self.frame_time[1], self.frame_time[2], self.frame_time[3]))
video_duration = mathOps.mapFrameToTime(self.TOTAL_VIDEO_FRAMES, fps)
video_duration_text = ("%02d:%02d:%02d:%03d" % (
video_duration[0], video_duration[1], video_duration[2], video_duration[3]))
final_text = frame_text + " / " + video_duration_text
self.main_video_frames_label_2.setText(f'time {final_text}')
# reading the current frame from the video and loading it into the canvas
success, img = self.CAP.read()
if success:
frame_array = np.array(img)
self.loadFramefromVideo(frame_array, frame_idx)
else:
pass
self.frames_to_track_slider.setMaximum(
self.TOTAL_VIDEO_FRAMES - self.INDEX_OF_CURRENT_FRAME)
def frames_to_track_input_changed(self, text):
try:
value = int(text)
if 2 <= value <= self.frames_to_track_slider.maximum():
self.frames_to_track_slider.setValue(value)
elif value > self.frames_to_track_slider.maximum():
self.frames_to_track_slider.setValue(
self.frames_to_track_slider.maximum())
elif value < 2:
self.frames_to_track_slider.setValue(1)
except ValueError:
pass
def frames_to_track_slider_changed(self, value):
self.frames_to_track_input.setText(str(value))
self.FRAMES_TO_TRACK = self.frames_to_track_slider.value()
def track_assigned_objects_button_clicked(self):
# first check if there is objects in self.canvas.shapes list or not . if not then output a error message and return
if len(self.labelList.selectedItems()) == 0:
self.errorMessage(
"found No objects to track",
"you need to assign at least one object to track",
)
return
self.TRACK_ASSIGNED_OBJECTS_ONLY = True
self.track_buttonClicked()
self.TRACK_ASSIGNED_OBJECTS_ONLY = False
def update_gui_after_tracking(self, index):
if index != self.FRAMES_TO_TRACK - 1:
self.main_video_frames_slider.setValue(
self.INDEX_OF_CURRENT_FRAME + 1)
QtWidgets.QApplication.processEvents()
def certain_area_clicked(self, index):
self.canvas.cancelManualDrawing()
self.setEditMode()
self.canvas.setCursor(QtGui.QCursor(QtCore.Qt.CursorShape.CrossCursor))
if index == 0:
self.canvas.tracking_area = ""
self.canvas.tracking_area_polygon = []
else:
self.canvas.tracking_area = "drawing"
self.canvas.tracking_area_polygon = []
def track_dropdown_changed(self, index):
self.selected_option = index
def start_tracking_button_clicked(self):
try:
try:
if self.selected_option == 0:
self.track_buttonClicked()
elif self.selected_option == 1:
self.track_assigned_objects_button_clicked()
elif self.selected_option == 2:
self.track_full_video_button_clicked()
except Exception as e:
self.track_buttonClicked()
except Exception as e:
MsgBox.OKmsgBox("Error", f"Error: {e}", "critical")
def track_buttonClicked(self):
# Disable Exports & Change button text
self.actions.export.setEnabled(False)
self.tracking_progress_bar.setVisible(True)
# first we need to check there is a json file with the same name as the video
listObj = self.load_objects_from_json__orjson()
existing_annotation = False
shapes = self.canvas.shapes
tracks_to_follow = None
if len(shapes) > 0:
existing_annotation = True
tracks_to_follow = []
for shape in shapes:
if shape.group_id != None:
tracks_to_follow.append(int(shape.group_id))
self.TrackingMode = True
curr_frame, prev_frame = None, None
if self.FRAMES_TO_TRACK + self.INDEX_OF_CURRENT_FRAME <= self.TOTAL_VIDEO_FRAMES:
number_of_frames_to_track = self.FRAMES_TO_TRACK
else:
number_of_frames_to_track = self.TOTAL_VIDEO_FRAMES - self.INDEX_OF_CURRENT_FRAME
self.interrupted = False
for i in range(number_of_frames_to_track):
QtWidgets.QApplication.processEvents()
if self.interrupted:
self.interrupted = False
break
if i % 100 == 0:
self.load_objects_to_json__orjson(listObj)
self.tracking_progress_bar.setValue(
int((i + 1) / number_of_frames_to_track * 100))
if existing_annotation:
existing_annotation = False
shapes = self.canvas.shapes
shapes = mathOps.convert_qt_shapes_to_shapes(shapes)
else:
with torch.no_grad():
shapes = self.annotate_one(called_from_tracking=True)
curr_frame = self.CURRENT_FRAME_IMAGE
if len(shapes) == 0:
self.update_gui_after_tracking(i)
continue
for shape in shapes:
if shape['content'] is None:
shape['content'] = 1.0
boxes, confidences, class_ids, segments = mathOps.get_boxes_conf_classids_segments(
shapes)
boxes = np.array(boxes, dtype=int)
confidences = np.array(confidences)
class_ids = np.array(class_ids)
detections = Detections(
xyxy=boxes,
confidence=confidences,
class_id=class_ids,
)
boxes = torch.from_numpy(detections.xyxy)
confidences = torch.from_numpy(detections.confidence)
class_ids = torch.from_numpy(detections.class_id)
dets = torch.cat((boxes, confidences.unsqueeze(
1), class_ids.unsqueeze(1)), dim=1)
dets = dets.to(torch.float32)
if hasattr(self.tracker, 'tracker') and hasattr(self.tracker.tracker, 'camera_update'):
if prev_frame is not None and curr_frame is not None: # camera motion compensation
self.tracker.tracker.camera_update(prev_frame, curr_frame)
prev_frame = curr_frame
with torch.no_grad():
org_tracks = self.tracker.update(
dets.cpu(), self.CURRENT_FRAME_IMAGE)
tracks = []
for org_track in org_tracks:
track = []
for i in range(6):
track.append(int(org_track[i]))
track[4] += int(self.maxID)
track.append(org_track[6])
tracks.append(track)
matched_shapes, unmatched_shapes = mathOps.match_detections_with_tracks(
shapes, tracks)
shapes = matched_shapes
self.CURRENT_SHAPES_IN_IMG = [
shape_ for shape_ in shapes if shape_["group_id"] is not None]
if self.TRACK_ASSIGNED_OBJECTS_ONLY and tracks_to_follow is not None:
try:
if len(self.labelList.selectedItems()) != 0:
tracks_to_follow = []
for item in self.labelList.selectedItems():
x = item.text()
i1, i2 = x.find('D'), x.find(':')
tracks_to_follow.append(int(x[i1 + 2:i2]))
self.CURRENT_SHAPES_IN_IMG = [
shape_ for shape_ in shapes if shape_["group_id"] in tracks_to_follow]
except:
# this happens when the user selects a label that is not a tracked object so there is error in extracting the tracker id
# show a message box to the user (hinting to use the tracker on the image first so that the label has a tracker id to be selected)
self.errorMessage(
'Error', 'Please use the tracker on the image first so that you can select labels with IDs to track')
return
# to understand the json output file structure it is a dictionary of frames and each frame is a dictionary of tracker_ids and each tracker_id is a dictionary of bbox , confidence , class_id , segment
json_frame = {}
json_frame.update({'frame_idx': self.INDEX_OF_CURRENT_FRAME})
json_frame_object_list = []
for shape in self.CURRENT_SHAPES_IN_IMG:
self.rec_frame_for_id(
int(shape["group_id"]), self.INDEX_OF_CURRENT_FRAME, type_='add')
json_tracked_object = {}
json_tracked_object['tracker_id'] = int(shape["group_id"])
json_tracked_object['bbox'] = [int(i) for i in shape['bbox']]
json_tracked_object['confidence'] = shape["content"]
json_tracked_object['class_name'] = shape["label"]
json_tracked_object['class_id'] = coco_classes.index(
shape["label"]) if shape["label"] in coco_classes else -1
points = shape["points"]
segment = [[int(points[z]), int(points[z + 1])]
for z in range(0, len(points), 2)]
json_tracked_object['segment'] = segment
json_frame_object_list.append(json_tracked_object)
json_frame.update({'frame_data': json_frame_object_list})
listObj[self.INDEX_OF_CURRENT_FRAME - 1] = json_frame
QtWidgets.QApplication.processEvents()
self.update_gui_after_tracking(i)
print('finished tracking for frame ', self.INDEX_OF_CURRENT_FRAME)
self.load_objects_to_json__orjson(listObj)
# Notify the user that the tracking is finished
self._config = get_config()
if not self._config["mute"]:
if not self.isActiveWindow():
notification.PopUp("Tracking Completed")
self.TrackingMode = False
self.labelFile = None
self.main_video_frames_slider.setValue(self.INDEX_OF_CURRENT_FRAME - 1)
self.main_video_frames_slider.setValue(self.INDEX_OF_CURRENT_FRAME)
self.tracking_progress_bar.hide()
self.tracking_progress_bar.setValue(0)
# Enable Exports & Restore button Text and Color
self.actions.export.setEnabled(True)
def track_full_video_button_clicked(self):
self.FRAMES_TO_TRACK = int(
self.TOTAL_VIDEO_FRAMES - self.INDEX_OF_CURRENT_FRAME)
self.track_buttonClicked()
def set_video_controls_visibility(self, visible=False):
# make it invisible by default
self.videoControls.setVisible(visible)
for widget in self.videoControls.children():
try:
widget.setVisible(visible)
except:
pass
self.videoControls_2.setVisible(visible)
for widget in self.videoControls_2.children():
try:
widget.setVisible(visible)
except:
pass
def traj_checkBox_changed(self):
try:
self.CURRENT_ANNOATAION_FLAGS["traj"] = self.traj_checkBox.isChecked(
)
self.update_current_frame_annotation()
self.main_video_frames_slider_changed()
except:
pass
def mask_checkBox_changed(self):
try:
self.CURRENT_ANNOATAION_FLAGS["mask"] = self.mask_checkBox.isChecked(
)
self.update_current_frame_annotation()
self.main_video_frames_slider_changed()
except:
pass
self.refresh_image_MODE()
def class_checkBox_changed(self):
try:
self.CURRENT_ANNOATAION_FLAGS["class"] = self.class_checkBox.isChecked(
)
self.update_current_frame_annotation()
self.main_video_frames_slider_changed()
except:
pass
self.refresh_image_MODE()
def conf_checkBox_changed(self):
try:
self.CURRENT_ANNOATAION_FLAGS["conf"] = self.conf_checkBox.isChecked(
)
self.update_current_frame_annotation()
self.main_video_frames_slider_changed()
except:
pass
self.refresh_image_MODE()
def id_checkBox_changed(self):
try:
self.CURRENT_ANNOATAION_FLAGS["id"] = self.id_checkBox.isChecked()
self.update_current_frame_annotation()
self.main_video_frames_slider_changed()
except:
pass
def bbox_checkBox_changed(self):
try:
self.CURRENT_ANNOATAION_FLAGS["bbox"] = self.bbox_checkBox.isChecked(
)
self.update_current_frame_annotation()
self.main_video_frames_slider_changed()
except:
pass
self.refresh_image_MODE()
def polygons_visable_checkBox_changed(self):
try:
self.CURRENT_ANNOATAION_FLAGS["polygons"] = self.polygons_visable_checkBox.isChecked(
)
self.update_current_frame_annotation()
for shape in self.canvas.shapes:
self.canvas.setShapeVisible(
shape, self.CURRENT_ANNOATAION_FLAGS["polygons"])
except:
pass
def export_as_video_button_clicked(self, output_filename=None):
self.update_current_frame_annotation()
input_video_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}.mp4'
output_video_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.mp4'
if output_filename is not False:
output_video_file_name = output_filename
input_cap = cv2.VideoCapture(input_video_file_name)
output_cap = cv2.VideoWriter(output_video_file_name, cv2.VideoWriter_fourcc(
*'mp4v'), int(self.CURRENT_VIDEO_FPS), (int(self.CURRENT_VIDEO_WIDTH), int(self.CURRENT_VIDEO_HEIGHT)))
listObj = self.load_objects_from_json__orjson()
# make a progress bar for exporting video (with percentage of progress) TO DO LATER
empty_frame = False
empty_video = True
for target_frame_idx in range(self.TOTAL_VIDEO_FRAMES):
try:
self.INDEX_OF_CURRENT_FRAME = target_frame_idx + 1
ret, image = input_cap.read()
shapes = []
frame_objects = listObj[target_frame_idx]['frame_data']
for object_ in frame_objects:
shape = {}
shape["label"] = object_['class_name']
shape["group_id"] = str(object_['tracker_id'])
shape["content"] = str(object_['confidence'])
shape["bbox"] = object_['bbox']
points = object_['segment']
points = np.array(points, np.int16).flatten().tolist()
shape["points"] = points
shape["shape_type"] = "polygon"
shape["other_data"] = {}
shape["flags"] = {}
shapes.append(shape)
if len(shapes) == 0:
if not empty_frame:
self.waitWindow(visible=True, text=f'Processing...')
empty_frame = True
continue
self.waitWindow(
visible=True, text=f'Please Wait.\nFrame {target_frame_idx} is being exported...')
image = self.draw_bb_on_image(
image, shapes, image_qt_flag=False)
output_cap.write(image)
empty_frame = False
empty_video = False
except:
input_cap.release()
output_cap.release()
input_cap.release()
output_cap.release()
self.waitWindow()
try:
if empty_video:
os.remove(output_video_file_name)
return False
except:
pass
self.INDEX_OF_CURRENT_FRAME = self.main_video_frames_slider.value()
# show message saying that the video is exported
if output_filename is False:
MsgBox.OKmsgBox("Export Video", "Done Exporting Video")
if output_filename is not False:
return output_filename
def clear_video_annotations_button_clicked(self):
self.global_listObj = []
self.CURRENT_ANNOATAION_TRAJECTORIES = {'length': 30,
'alpha': 0.70}
self.key_frames.clear()
self.id_frames_rec.clear()
self.minID = -2
self.maxID = 0
for shape in self.canvas.shapes:
self.canvas.deleteShape(shape)
self.CURRENT_SHAPES_IN_IMG = []
# just delete the json file and reload the video
# to delete the json file we need to know the name of the json file which is the same as the video name
json_file_name = f'{self.CURRENT_VIDEO_PATH}/{self.CURRENT_VIDEO_NAME}_tracking_results.json'
# now delete the json file if it exists
if os.path.exists(json_file_name):
os.remove(json_file_name)
MsgBox.OKmsgBox("clear annotations",
"All video frames annotations are cleared")
self.main_video_frames_slider.setValue(2)
self.main_video_frames_slider.setValue(1)
def update_current_frame_annotation_button_clicked(self):
if self.sam_model_comboBox.currentIndex() != 0 and self.canvas.SAM_mode != "finished" and not self.TrackingMode:
self.sam_clear_annotation_button_clicked()
try:
x = self.CURRENT_VIDEO_PATH
except:
return
self.update_current_frame_annotation()
self.main_video_frames_slider_changed()
def update_current_frame_annotation(self):
if self.current_annotation_mode != "video":
return
listObj = self.load_objects_from_json__orjson()
json_frame = {}
json_frame.update({'frame_idx': self.INDEX_OF_CURRENT_FRAME})
json_frame_object_list = []
shapes = mathOps.convert_qt_shapes_to_shapes(self.canvas.shapes)
for shape in shapes:
json_tracked_object = {}
if shape["group_id"] != None:
json_tracked_object['tracker_id'] = int(shape["group_id"])
else:
json_tracked_object['tracker_id'] = self.minID
self.minID -= 1
bbox = shape["bbox"]
bbox = [int(bbox[0]), int(bbox[1]), int(bbox[2]), int(bbox[3])]
json_tracked_object['bbox'] = bbox
json_tracked_object['confidence'] = str(
shape["content"] if shape["content"] != None else 1)
json_tracked_object['class_name'] = shape["label"]
json_tracked_object['class_id'] = coco_classes.index(
shape["label"]) if shape["label"] in coco_classes else -1
points = shape["points"]
segment = [[int(points[z]), int(points[z + 1])]
for z in range(0, len(points), 2)]
json_tracked_object['segment'] = segment
json_frame_object_list.append(json_tracked_object)
json_frame.update({'frame_data': json_frame_object_list})
listObj[self.INDEX_OF_CURRENT_FRAME - 1] = json_frame
self.load_objects_to_json__orjson(listObj)
print("saved frame annotation")
def trajectory_length_lineEdit_changed(self):
try:
text = self.trajectory_length_lineEdit.text()
self.CURRENT_ANNOATAION_TRAJECTORIES['length'] = int(
text) if text != '' else 1
self.main_video_frames_slider_changed()
except:
pass
def addVideoControls(self):
# add video controls toolbar with custom style (background color , spacing , hover color)
self.videoControls = QtWidgets.QToolBar()
self.videoControls.setMovable(True)
self.videoControls.setFloatable(True)
self.videoControls.setObjectName("videoControls")
self.videoControls.setStyleSheet(
"QToolBar#videoControls { border: 50px }")
self.addToolBar(Qt.ToolBarArea.BottomToolBarArea, self.videoControls)
self.videoControls_2 = QtWidgets.QToolBar()
self.videoControls_2.setMovable(True)
self.videoControls_2.setFloatable(True)
self.videoControls_2.setObjectName("videoControls_2")
self.videoControls_2.setStyleSheet(
"QToolBar#videoControls_2 { border: 50px }")
self.addToolBar(Qt.ToolBarArea.TopToolBarArea, self.videoControls_2)
self.frames_to_skip_slider = QtWidgets.QSlider(QtCore.Qt.Orientation.Horizontal)
self.frames_to_skip_slider.setMinimum(1)
self.frames_to_skip_slider.setMaximum(100)
self.frames_to_skip_slider.setValue(3)
self.frames_to_skip_slider.setTickPosition(
QtWidgets.QSlider.TickPosition.TicksBelow)
self.frames_to_skip_slider.setTickInterval(1)
self.frames_to_skip_slider.setMaximumWidth(250)
self.frames_to_skip_slider.valueChanged.connect(
self.frames_to_skip_slider_changed)
self.frames_to_skip_label = QtWidgets.QLabel()
self.frames_to_skip_label.setStyleSheet(
"QLabel { font-size: 10pt; font-weight: bold; }")
self.frames_to_skip_slider.setValue(30)
self.videoControls.addWidget(self.frames_to_skip_label)
self.videoControls.addWidget(self.frames_to_skip_slider)
self.previousFrame_button = QtWidgets.QPushButton()
self.previousFrame_button.setText("<<")
self.previousFrame_button.setShortcut(
self._config['shortcuts']['prev_x'])
self.previousFrame_button.setToolTip(
f'Jump Backward ({self._config["shortcuts"]["prev_x"]})')
self.previousFrame_button.clicked.connect(
self.previousFrame_buttonClicked)
self.previous_1_Frame_button = QtWidgets.QPushButton()
self.previous_1_Frame_button.setText("<")
self.previous_1_Frame_button.setShortcut(
self._config['shortcuts']['prev_1'])
self.previous_1_Frame_button.setToolTip(
f'Previous Frame ({self._config["shortcuts"]["prev_1"]})')
self.previous_1_Frame_button.clicked.connect(
self.previous_1_Frame_buttonclicked)
self.playPauseButton = QtWidgets.QPushButton()
self.playPauseButton_mode = "Play"
self.playPauseButton.setShortcut(self._config['shortcuts']['play'])
self.playPauseButton.setToolTip(
f'Play ({self._config["shortcuts"]["play"]})')
self.playPauseButton.setIcon(
self.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_MediaPlay))
self.playPauseButton.setIconSize(QtCore.QSize(22, 22))
self.playPauseButton.setStyleSheet("QPushButton { margin: 5px;}")
# when the button is clicked, print "Pressed!" in the terminal
self.playPauseButton.pressed.connect(self.playPauseButtonClicked)
self.nextFrame_button = QtWidgets.QPushButton()
self.nextFrame_button.setText(">>")
self.nextFrame_button.setShortcut(self._config['shortcuts']['next_x'])
self.nextFrame_button.setToolTip(
f'Jump forward ({self._config["shortcuts"]["next_x"]})')
self.nextFrame_button.clicked.connect(self.nextFrame_buttonClicked)
self.next_1_Frame_button = QtWidgets.QPushButton()
self.next_1_Frame_button.setText(">")
self.next_1_Frame_button.setShortcut(
self._config['shortcuts']['next_1'])
self.next_1_Frame_button.setToolTip(
f'Next Frame ({self._config["shortcuts"]["next_1"]})')
self.next_1_Frame_button.clicked.connect(
self.next_1_Frame_buttonClicked)
self.videoControls.addWidget(self.previousFrame_button)
self.videoControls.addWidget(self.previous_1_Frame_button)
self.videoControls.addWidget(self.playPauseButton)
self.videoControls.addWidget(self.next_1_Frame_button)
self.videoControls.addWidget(self.nextFrame_button)
self.main_video_frames_slider = QtWidgets.QSlider(QtCore.Qt.Orientation.Horizontal)
self.main_video_frames_slider.setMinimum(1)
self.main_video_frames_slider.setMaximum(100)
self.main_video_frames_slider.setValue(2)
self.main_video_frames_slider.setTickPosition(
QtWidgets.QSlider.TickPosition.TicksBelow)
self.main_video_frames_slider.setTickInterval(1)
self.main_video_frames_slider.setMaximumWidth(1000)
self.main_video_frames_slider.valueChanged.connect(
self.main_video_frames_slider_changed)
self.main_video_frames_label_1 = QtWidgets.QLabel()
self.main_video_frames_label_2 = QtWidgets.QLabel()
# make the label text bigger and bold
self.main_video_frames_label_1.setStyleSheet(
"QLabel { font-size: 12pt; font-weight: bold; }")
self.main_video_frames_label_2.setStyleSheet(
"QLabel { font-size: 12pt; font-weight: bold; }")
# labels should show the current frame number / total number of frames and cuurent time / total time
self.videoControls.addWidget(self.main_video_frames_label_1)
self.videoControls.addWidget(self.main_video_frames_slider)
self.videoControls.addWidget(self.main_video_frames_label_2)
# now we start the videocontrols_2 toolbar widgets
# add the slider to control the video frame
self.frames_to_track_slider = QtWidgets.QSlider(QtCore.Qt.Orientation.Horizontal)
self.frames_to_track_slider.setMinimum(1)
self.frames_to_track_slider.setMaximum(100)
self.frames_to_track_slider.setValue(4)
self.frames_to_track_slider.setTickPosition(
QtWidgets.QSlider.TickPosition.TicksBelow)
self.frames_to_track_slider.setTickInterval(1)
self.frames_to_track_slider.setMaximumWidth(200)
self.frames_to_track_slider.valueChanged.connect(
self.frames_to_track_slider_changed)
# add text input to control the slider
self.frames_to_track_input = QtWidgets.QLineEdit()
self.frames_to_track_input.setText("4")
# make the font bigger
self.frames_to_track_input.setStyleSheet(
"QLineEdit { font-size: 10pt; }")
self.frames_to_track_input.setMaximumWidth(50)
self.frames_to_track_input.textChanged.connect(
self.frames_to_track_input_changed)
self.frames_to_track_label_before = QtWidgets.QLabel("Track for")
self.frames_to_track_label_before.setStyleSheet(
"QLabel { font-size: 10pt; font-weight: bold; }")
self.frames_to_track_label_after = QtWidgets.QLabel("frames")
self.frames_to_track_label_after.setStyleSheet(
"QLabel { font-size: 10pt; font-weight: bold; }")
self.videoControls_2.addWidget(self.frames_to_track_label_before)
self.videoControls_2.addWidget(self.frames_to_track_input)
self.videoControls_2.addWidget(self.frames_to_track_label_after)
self.videoControls_2.addWidget(self.frames_to_track_slider)
self.frames_to_track_slider.setValue(10)
self.track_dropdown = QtWidgets.QComboBox()
self.track_dropdown.addItems(
[f"Track for selected frames", "Track Only assigned objects", "Track Full Video"])
self.track_dropdown.setCurrentIndex(0)
self.track_dropdown.currentIndexChanged.connect(
self.track_dropdown_changed)
self.videoControls_2.addWidget(self.track_dropdown)
self.start_button = QtWidgets.QPushButton("Start Tracking")
self.start_button.setIcon(
QtGui.QIcon("labelme/icons/start.png"))
# make the icon bigger
self.start_button.setIconSize(QtCore.QSize(24, 24))
self.start_button.setStyleSheet(self.buttons_text_style_sheet)
self.start_button.clicked.connect(self.start_tracking_button_clicked)
self.videoControls_2.addWidget(self.start_button)
self.tracking_progress_bar_label = QtWidgets.QLabel()
self.tracking_progress_bar_label.setStyleSheet(
"QLabel { font-size: 10pt; font-weight: bold; }")
self.tracking_progress_bar_label.setText("Tracking Progress")
self.videoControls_2.addWidget(self.tracking_progress_bar_label)
self.tracking_progress_bar = QtWidgets.QProgressBar()
self.tracking_progress_bar.setMaximumWidth(300)
self.tracking_progress_bar.setMinimum(0)
self.tracking_progress_bar.setMaximum(100)
self.tracking_progress_bar.setValue(0)
self.videoControls_2.addWidget(self.tracking_progress_bar)
self.track_stop_button = QtWidgets.QPushButton()
self.track_stop_button.setStyleSheet(
"QPushButton {font-size: 10pt; margin: 2px 5px; padding: 2px 7px;font-weight: bold; background-color: #FF9090; color: #FFFFFF;} QPushButton:hover {background-color: #FF0000;} QPushButton:disabled {background-color: #7A7A7A;}")
self.track_stop_button.setStyleSheet(
"QPushButton {font-size: 10pt; margin: 2px 5px; padding: 2px 7px;font-weight: bold; background-color: #FF0000; color: #FFFFFF;} QPushButton:hover {background-color: #FE4242;} QPushButton:disabled {background-color: #7A7A7A;}")
self.track_stop_button.setText("Stop Tracking")
self.track_stop_button.setIcon(
QtGui.QIcon("labelme/icons/stop.png"))
# make the icon bigger
self.track_stop_button.setIconSize(QtCore.QSize(24, 24))
# self.track_stop_button.setShortcut(self._config['shortcuts']['stop'])
self.track_stop_button.setToolTip(
f'Stop Tracking ({self._config["shortcuts"]["stop"]})')
self.track_stop_button.pressed.connect(
self.Escape_clicked)
self.videoControls_2.addWidget(self.track_stop_button)
# add 5 checkboxes to control the CURRENT ANNOATAION FLAGS including (bbox , id , class , mask , traj)
self.bbox_checkBox = QtWidgets.QCheckBox()
self.bbox_checkBox.setText("bbox")
self.bbox_checkBox.setChecked(True)
self.bbox_checkBox.stateChanged.connect(self.bbox_checkBox_changed)
self.id_checkBox = QtWidgets.QCheckBox()
self.id_checkBox.setText("id")
self.id_checkBox.setChecked(True)
self.id_checkBox.stateChanged.connect(self.id_checkBox_changed)
self.class_checkBox = QtWidgets.QCheckBox()
self.class_checkBox.setText("class")
self.class_checkBox.setChecked(True)
self.class_checkBox.stateChanged.connect(self.class_checkBox_changed)
self.conf_checkBox = QtWidgets.QCheckBox()
self.conf_checkBox.setText("confidence")
self.conf_checkBox.setChecked(True)
self.conf_checkBox.stateChanged.connect(self.conf_checkBox_changed)
self.mask_checkBox = QtWidgets.QCheckBox()
self.mask_checkBox.setText("mask")
self.mask_checkBox.setChecked(True)
self.mask_checkBox.stateChanged.connect(self.mask_checkBox_changed)
self.traj_checkBox = QtWidgets.QCheckBox()
self.traj_checkBox.setText("trajectories")
self.traj_checkBox.setChecked(False)
self.traj_checkBox.stateChanged.connect(self.traj_checkBox_changed)
# make qlineedit to alter the self.CURRENT_ANNOATAION_TRAJECTORIES['length'] value
self.trajectory_length_lineEdit = QtWidgets.QLineEdit()
self.trajectory_length_lineEdit.setText(str(30))
self.trajectory_length_lineEdit.setMaximumWidth(50)
self.trajectory_length_lineEdit.editingFinished.connect(
self.trajectory_length_lineEdit_changed)
self.polygons_visable_checkBox = QtWidgets.QCheckBox()
self.polygons_visable_checkBox.setText("show polygons")
self.polygons_visable_checkBox.setChecked(True)
self.polygons_visable_checkBox.stateChanged.connect(
self.polygons_visable_checkBox_changed)
self.vis_options = [self.id_checkBox, self.class_checkBox, self.bbox_checkBox, self.mask_checkBox,
self.polygons_visable_checkBox, self.traj_checkBox, self.trajectory_length_lineEdit, self.conf_checkBox]
# add to self.vis_dock
self.vis_widget.setLayout(QtWidgets.QGridLayout())
self.vis_widget.layout().setContentsMargins(10, 10, 25, 10) # set padding
self.vis_widget.layout().addWidget(self.id_checkBox, 0, 0)
self.vis_widget.layout().addWidget(self.class_checkBox, 0, 1)
self.vis_widget.layout().addWidget(self.bbox_checkBox, 1, 0)
self.vis_widget.layout().addWidget(self.mask_checkBox, 1, 1)
self.vis_widget.layout().addWidget(self.traj_checkBox, 2, 0)
self.vis_widget.layout().addWidget(self.trajectory_length_lineEdit, 2, 1)
self.vis_widget.layout().addWidget(self.polygons_visable_checkBox, 3, 0)
self.vis_widget.layout().addWidget(self.conf_checkBox, 3, 1)
for option in self.vis_options:
option.setEnabled(False)
# save current frame
self.update_current_frame_annotation_button = QtWidgets.QPushButton()
self.update_current_frame_annotation_button.setStyleSheet(
self.buttons_text_style_sheet)
self.update_current_frame_annotation_button.setText(
"Apply Changes")
self.update_current_frame_annotation_button.setIcon(
QtGui.QIcon("labelme/icons/done.png"))
# make the icon bigger
self.update_current_frame_annotation_button.setIconSize(
QtCore.QSize(24, 24))
self.update_current_frame_annotation_button.setShortcut(
self._config['shortcuts']['update_frame'])
self.update_current_frame_annotation_button.setToolTip(
f'Apply changes on current frame ({self._config["shortcuts"]["update_frame"]})')
self.update_current_frame_annotation_button.clicked.connect(
self.update_current_frame_annotation_button_clicked)
self.videoControls_2.addWidget(
self.update_current_frame_annotation_button)
# add a button to clear all video annotations
self.clear_video_annotations_button = QtWidgets.QPushButton()
self.clear_video_annotations_button.setStyleSheet(
self.buttons_text_style_sheet)
self.clear_video_annotations_button.setText("Clear All")
self.clear_video_annotations_button.setIcon(
QtGui.QIcon("labelme/icons/clear.png"))
# make the icon bigger
self.clear_video_annotations_button.setIconSize(QtCore.QSize(24, 24))
self.clear_video_annotations_button.setShortcut(
self._config['shortcuts']['clear_annotations'])
self.clear_video_annotations_button.setToolTip(
f'Clears Annotations from all frames ({self._config["shortcuts"]["clear_annotations"]})')
self.clear_video_annotations_button.clicked.connect(
self.clear_video_annotations_button_clicked)
self.videoControls_2.addWidget(self.clear_video_annotations_button)
self.set_video_controls_visibility(False)
def draw_bb_on_image(self, image, shapes, image_qt_flag=True):
return visualizations.draw_bb_on_image(self.CURRENT_ANNOATAION_TRAJECTORIES,
self.INDEX_OF_CURRENT_FRAME,
self.CURRENT_ANNOATAION_FLAGS,
self.TOTAL_VIDEO_FRAMES,
image, shapes, image_qt_flag)
def waitWindow(self, visible=False, text=None):
if visible:
self.canvas.is_loading = True
if text is not None:
self.canvas.loading_text = text
else:
self.canvas.is_loading = False
self.canvas.loading_text = "Loading..."
self.canvas.repaint()
QtWidgets.QApplication.processEvents()
def set_sam_toolbar_enable(self, enable=False):
for widget in self.sam_toolbar.children():
try:
widget.setEnabled(enable or widget.accessibleName(
) == 'sam_enhance_annotation_button' or widget.accessibleName() == 'sam_model_comboBox')
except:
pass
def set_sam_toolbar_visibility(self, visible=False):
if not visible:
try:
self.sam_clear_annotation_button_clicked()
self.sam_buttons_colors("X")
gitextract_auoxlo5f/ ├── .github/ │ └── workflows/ │ └── retype-action.yml ├── .gitignore ├── DLTA_AI_app/ │ ├── .flake8 │ ├── .gitignore │ ├── .gitmodules │ ├── __main__.py │ ├── __main__.spec │ ├── inferencing.py │ ├── labelme/ │ │ ├── __init__.py │ │ ├── app.py │ │ ├── cli/ │ │ │ ├── __init__.py │ │ │ ├── draw_json.py │ │ │ ├── draw_label_png.py │ │ │ ├── json_to_dataset.py │ │ │ └── on_docker.py │ │ ├── config/ │ │ │ ├── __init__.py │ │ │ ├── default_config.yaml │ │ │ └── default_config_base.yaml │ │ ├── intelligence.py │ │ ├── label_file.py │ │ ├── logger.py │ │ ├── shape.py │ │ ├── testing.py │ │ ├── utils/ │ │ │ ├── __init__.py │ │ │ ├── _io.py │ │ │ ├── custom_exports.py │ │ │ ├── export.py │ │ │ ├── helpers/ │ │ │ │ ├── mathOps.py │ │ │ │ └── visualizations.py │ │ │ ├── image.py │ │ │ ├── model_explorer.py │ │ │ ├── qt.py │ │ │ ├── sam.py │ │ │ ├── shape.py │ │ │ └── vid_to_frames.py │ │ └── widgets/ │ │ ├── ClassesWidget.py │ │ ├── MsgBox.py │ │ ├── ThresholdWidget.py │ │ ├── __init__.py │ │ ├── brightness_contrast_dialog.py │ │ ├── canvas.py │ │ ├── check_updates_UI.py │ │ ├── color_dialog.py │ │ ├── deleteSelectedShape_UI.py │ │ ├── editLabel_videoMode.py │ │ ├── escapable_qlist_widget.py │ │ ├── exportData_UI.py │ │ ├── feedback_UI.py │ │ ├── getIDfromUser_UI.py │ │ ├── interpolation_UI.py │ │ ├── label_dialog.py │ │ ├── label_list_widget.py │ │ ├── links.py │ │ ├── merge_feature_UI.py │ │ ├── notification.py │ │ ├── open_file.py │ │ ├── preferences_UI.py │ │ ├── runtime_data_UI.py │ │ ├── scaleObject_UI.py │ │ ├── segmentation_options_UI.py │ │ ├── shortcut_selector_UI.py │ │ ├── tool_bar.py │ │ ├── unique_label_qlist_widget.py │ │ └── zoom_widget.py │ ├── mmdetection/ │ │ ├── .circleci/ │ │ │ └── config.yml │ │ ├── .dev_scripts/ │ │ │ ├── batch_test_list.py │ │ │ ├── batch_train_list.txt │ │ │ ├── benchmark_filter.py │ │ │ ├── benchmark_inference_fps.py │ │ │ ├── benchmark_test_image.py │ │ │ ├── check_links.py │ │ │ ├── convert_test_benchmark_script.py │ │ │ ├── convert_train_benchmark_script.py │ │ │ ├── gather_models.py │ │ │ ├── gather_test_benchmark_metric.py │ │ │ ├── gather_train_benchmark_metric.py │ │ │ ├── linter.sh │ │ │ ├── test_benchmark.sh │ │ │ ├── test_init_backbone.py │ │ │ └── train_benchmark.sh │ │ ├── .gitignore │ │ ├── .owners.yml │ │ ├── .pre-commit-config.yaml │ │ ├── .readthedocs.yml │ │ ├── CITATION.cff │ │ ├── LICENSE │ │ ├── MANIFEST.in │ │ ├── configs/ │ │ │ ├── _base_/ │ │ │ │ ├── datasets/ │ │ │ │ │ ├── cityscapes_detection.py │ │ │ │ │ ├── cityscapes_instance.py │ │ │ │ │ ├── coco_detection.py │ │ │ │ │ ├── coco_instance.py │ │ │ │ │ ├── coco_instance_semantic.py │ │ │ │ │ ├── coco_panoptic.py │ │ │ │ │ ├── deepfashion.py │ │ │ │ │ ├── lvis_v0.5_instance.py │ │ │ │ │ ├── lvis_v1_instance.py │ │ │ │ │ ├── openimages_detection.py │ │ │ │ │ ├── voc0712.py │ │ │ │ │ └── wider_face.py │ │ │ │ ├── default_runtime.py │ │ │ │ ├── models/ │ │ │ │ │ ├── cascade_mask_rcnn_r50_fpn.py │ │ │ │ │ ├── cascade_rcnn_r50_fpn.py │ │ │ │ │ ├── fast_rcnn_r50_fpn.py │ │ │ │ │ ├── faster_rcnn_r50_caffe_c4.py │ │ │ │ │ ├── faster_rcnn_r50_caffe_dc5.py │ │ │ │ │ ├── faster_rcnn_r50_fpn.py │ │ │ │ │ ├── mask_rcnn_r50_caffe_c4.py │ │ │ │ │ ├── mask_rcnn_r50_fpn.py │ │ │ │ │ ├── retinanet_r50_fpn.py │ │ │ │ │ ├── rpn_r50_caffe_c4.py │ │ │ │ │ ├── rpn_r50_fpn.py │ │ │ │ │ └── ssd300.py │ │ │ │ └── schedules/ │ │ │ │ ├── schedule_1x.py │ │ │ │ ├── schedule_20e.py │ │ │ │ └── schedule_2x.py │ │ │ ├── albu_example/ │ │ │ │ └── mask_rcnn_r50_fpn_albu_1x_coco.py │ │ │ ├── atss/ │ │ │ │ ├── atss_r101_fpn_1x_coco.py │ │ │ │ ├── atss_r50_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── autoassign/ │ │ │ │ ├── autoassign_r50_fpn_8x2_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── carafe/ │ │ │ │ ├── faster_rcnn_r50_fpn_carafe_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_carafe_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── cascade_rcnn/ │ │ │ │ ├── cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_r101_fpn_20e_coco.py │ │ │ │ ├── cascade_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_r50_fpn_20e_coco.py │ │ │ │ ├── cascade_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_x101_32x4d_fpn_20e_coco.py │ │ │ │ ├── cascade_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── cascade_rcnn_x101_64x4d_fpn_20e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── cascade_rpn/ │ │ │ │ ├── crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── crpn_r50_caffe_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── centernet/ │ │ │ │ ├── centernet_resnet18_140e_coco.py │ │ │ │ ├── centernet_resnet18_dcnv2_140e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── centripetalnet/ │ │ │ │ ├── centripetalnet_hourglass104_mstest_16x6_210e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── cityscapes/ │ │ │ │ ├── faster_rcnn_r50_fpn_1x_cityscapes.py │ │ │ │ └── mask_rcnn_r50_fpn_1x_cityscapes.py │ │ │ ├── common/ │ │ │ │ ├── lsj_100e_coco_instance.py │ │ │ │ ├── mstrain-poly_3x_coco_instance.py │ │ │ │ ├── mstrain_3x_coco.py │ │ │ │ ├── mstrain_3x_coco_instance.py │ │ │ │ ├── ssj_270k_coco_instance.py │ │ │ │ └── ssj_scp_270k_coco_instance.py │ │ │ ├── convnext/ │ │ │ │ ├── cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py │ │ │ │ ├── mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── cornernet/ │ │ │ │ ├── cornernet_hourglass104_mstest_10x5_210e_coco.py │ │ │ │ ├── cornernet_hourglass104_mstest_32x3_210e_coco.py │ │ │ │ ├── cornernet_hourglass104_mstest_8x6_210e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── dcn/ │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_dpool_1x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── dcnv2/ │ │ │ │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_mdpool_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── ddod/ │ │ │ │ ├── ddod_r50_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── deepfashion/ │ │ │ │ └── mask_rcnn_r50_fpn_15e_deepfashion.py │ │ │ ├── deformable_detr/ │ │ │ │ ├── deformable_detr_r50_16x2_50e_coco.py │ │ │ │ ├── deformable_detr_refine_r50_16x2_50e_coco.py │ │ │ │ ├── deformable_detr_twostage_refine_r50_16x2_50e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── detectors/ │ │ │ │ ├── cascade_rcnn_r50_rfp_1x_coco.py │ │ │ │ ├── cascade_rcnn_r50_sac_1x_coco.py │ │ │ │ ├── detectors_cascade_rcnn_r50_1x_coco.py │ │ │ │ ├── detectors_htc_r101_20e_coco.py │ │ │ │ ├── detectors_htc_r50_1x_coco.py │ │ │ │ ├── htc_r50_rfp_1x_coco.py │ │ │ │ ├── htc_r50_sac_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── detr/ │ │ │ │ ├── detr_r50_8x2_150e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── double_heads/ │ │ │ │ ├── dh_faster_rcnn_r50_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── dyhead/ │ │ │ │ ├── atss_r50_caffe_fpn_dyhead_1x_coco.py │ │ │ │ ├── atss_r50_fpn_dyhead_1x_coco.py │ │ │ │ ├── atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── dynamic_rcnn/ │ │ │ │ ├── dynamic_rcnn_r50_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── efficientnet/ │ │ │ │ ├── metafile.yml │ │ │ │ └── retinanet_effb3_fpn_crop896_8x4_1x_coco.py │ │ │ ├── empirical_attention/ │ │ │ │ ├── faster_rcnn_r50_fpn_attention_0010_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_attention_1111_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── fast_rcnn/ │ │ │ │ ├── fast_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── fast_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── fast_rcnn_r101_fpn_2x_coco.py │ │ │ │ ├── fast_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── fast_rcnn_r50_fpn_1x_coco.py │ │ │ │ └── fast_rcnn_r50_fpn_2x_coco.py │ │ │ ├── faster_rcnn/ │ │ │ │ ├── faster_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_r101_fpn_2x_coco.py │ │ │ │ ├── faster_rcnn_r101_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_c4_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_dc5_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_90k_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_2x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_bounded_iou_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_ciou_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_fp16_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_giou_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_iou_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_ohem_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_soft_nms_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_2x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_x101_64x4d_fpn_2x_coco.py │ │ │ │ ├── faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── fcos/ │ │ │ │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_1x_coco.py │ │ │ │ ├── fcos_center-normbbox-centeronreg-giou_r50_caffe_fpn_gn-head_dcn_1x_coco.py │ │ │ │ ├── fcos_center_r50_caffe_fpn_gn-head_1x_coco.py │ │ │ │ ├── fcos_r101_caffe_fpn_gn-head_1x_coco.py │ │ │ │ ├── fcos_r101_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py │ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_1x_coco.py │ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_fp16_1x_bs8x8_coco.py │ │ │ │ ├── fcos_r50_caffe_fpn_gn-head_mstrain_640-800_2x_coco.py │ │ │ │ ├── fcos_x101_64x4d_fpn_gn-head_mstrain_640-800_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── foveabox/ │ │ │ │ ├── fovea_align_r101_fpn_gn-head_4x4_2x_coco.py │ │ │ │ ├── fovea_align_r101_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ │ │ ├── fovea_align_r50_fpn_gn-head_4x4_2x_coco.py │ │ │ │ ├── fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ │ │ ├── fovea_r101_fpn_4x4_1x_coco.py │ │ │ │ ├── fovea_r101_fpn_4x4_2x_coco.py │ │ │ │ ├── fovea_r50_fpn_4x4_1x_coco.py │ │ │ │ ├── fovea_r50_fpn_4x4_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── fpg/ │ │ │ │ ├── faster_rcnn_r50_fpg-chn128_crop640_50e_coco.py │ │ │ │ ├── faster_rcnn_r50_fpg_crop640_50e_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_crop640_50e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpg-chn128_crop640_50e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpg_crop640_50e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_crop640_50e_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_r50_fpg-chn128_crop640_50e_coco.py │ │ │ │ └── retinanet_r50_fpg_crop640_50e_coco.py │ │ │ ├── free_anchor/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_free_anchor_r101_fpn_1x_coco.py │ │ │ │ ├── retinanet_free_anchor_r50_fpn_1x_coco.py │ │ │ │ └── retinanet_free_anchor_x101_32x4d_fpn_1x_coco.py │ │ │ ├── fsaf/ │ │ │ │ ├── fsaf_r101_fpn_1x_coco.py │ │ │ │ ├── fsaf_r50_fpn_1x_coco.py │ │ │ │ ├── fsaf_x101_64x4d_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── gcnet/ │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_dconv_c3-c5_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r16_gcb_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_syncbn-backbone_r4_gcb_c3-c5_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── gfl/ │ │ │ │ ├── gfl_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── gfl_r101_fpn_mstrain_2x_coco.py │ │ │ │ ├── gfl_r50_fpn_1x_coco.py │ │ │ │ ├── gfl_r50_fpn_mstrain_2x_coco.py │ │ │ │ ├── gfl_x101_32x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py │ │ │ │ ├── gfl_x101_32x4d_fpn_mstrain_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── ghm/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_ghm_r101_fpn_1x_coco.py │ │ │ │ ├── retinanet_ghm_r50_fpn_1x_coco.py │ │ │ │ ├── retinanet_ghm_x101_32x4d_fpn_1x_coco.py │ │ │ │ └── retinanet_ghm_x101_64x4d_fpn_1x_coco.py │ │ │ ├── gn/ │ │ │ │ ├── mask_rcnn_r101_fpn_gn-all_2x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_gn-all_3x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_2x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_3x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_contrib_2x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_contrib_3x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── gn+ws/ │ │ │ │ ├── faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py │ │ │ │ ├── faster_rcnn_x101_32x4d_fpn_gn_ws-all_1x_coco.py │ │ │ │ ├── faster_rcnn_x50_32x4d_fpn_gn_ws-all_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_gn_ws-all_20_23_24e_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_gn_ws-all_2x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn_ws-all_20_23_24e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_20_23_24e_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_gn_ws-all_2x_coco.py │ │ │ │ ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_20_23_24e_coco.py │ │ │ │ ├── mask_rcnn_x50_32x4d_fpn_gn_ws-all_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── grid_rcnn/ │ │ │ │ ├── grid_rcnn_r101_fpn_gn-head_2x_coco.py │ │ │ │ ├── grid_rcnn_r50_fpn_gn-head_1x_coco.py │ │ │ │ ├── grid_rcnn_r50_fpn_gn-head_2x_coco.py │ │ │ │ ├── grid_rcnn_x101_32x4d_fpn_gn-head_2x_coco.py │ │ │ │ ├── grid_rcnn_x101_64x4d_fpn_gn-head_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── groie/ │ │ │ │ ├── faster_rcnn_r50_fpn_groie_1x_coco.py │ │ │ │ ├── grid_rcnn_r50_fpn_gn-head_groie_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_groie_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-backbone_r4_gcb_c3-c5_groie_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── guided_anchoring/ │ │ │ │ ├── ga_fast_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_faster_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_faster_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_faster_r50_fpn_1x_coco.py │ │ │ │ ├── ga_faster_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── ga_faster_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── ga_retinanet_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_retinanet_r101_caffe_fpn_mstrain_2x.py │ │ │ │ ├── ga_retinanet_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_retinanet_r50_fpn_1x_coco.py │ │ │ │ ├── ga_retinanet_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── ga_retinanet_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── ga_rpn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_rpn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── ga_rpn_r50_fpn_1x_coco.py │ │ │ │ ├── ga_rpn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── ga_rpn_x101_64x4d_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── hrnet/ │ │ │ │ ├── cascade_mask_rcnn_hrnetv2p_w18_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_hrnetv2p_w32_20e_coco.py │ │ │ │ ├── cascade_mask_rcnn_hrnetv2p_w40_20e_coco.py │ │ │ │ ├── cascade_rcnn_hrnetv2p_w18_20e_coco.py │ │ │ │ ├── cascade_rcnn_hrnetv2p_w32_20e_coco.py │ │ │ │ ├── cascade_rcnn_hrnetv2p_w40_20e_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w18_1x_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w18_2x_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w32_1x_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w32_2x_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w40_1x_coco.py │ │ │ │ ├── faster_rcnn_hrnetv2p_w40_2x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w18_gn-head_4x4_1x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w18_gn-head_4x4_2x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w18_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w32_gn-head_4x4_1x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w32_gn-head_4x4_2x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ │ │ ├── fcos_hrnetv2p_w40_gn-head_mstrain_640-800_4x4_2x_coco.py │ │ │ │ ├── htc_hrnetv2p_w18_20e_coco.py │ │ │ │ ├── htc_hrnetv2p_w32_20e_coco.py │ │ │ │ ├── htc_hrnetv2p_w40_20e_coco.py │ │ │ │ ├── htc_hrnetv2p_w40_28e_coco.py │ │ │ │ ├── htc_x101_64x4d_fpn_16x1_28e_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w18_1x_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w18_2x_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w32_1x_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w32_2x_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w40_1x_coco.py │ │ │ │ ├── mask_rcnn_hrnetv2p_w40_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── htc/ │ │ │ │ ├── htc_r101_fpn_20e_coco.py │ │ │ │ ├── htc_r50_fpn_1x_coco.py │ │ │ │ ├── htc_r50_fpn_20e_coco.py │ │ │ │ ├── htc_without_semantic_r50_fpn_1x_coco.py │ │ │ │ ├── htc_x101_32x4d_fpn_16x1_20e_coco.py │ │ │ │ ├── htc_x101_64x4d_fpn_16x1_20e_coco.py │ │ │ │ ├── htc_x101_64x4d_fpn_dconv_c3-c5_mstrain_400_1400_16x1_20e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── instaboost/ │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_instaboost_4x_coco.py │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_instaboost_4x_coco.py │ │ │ │ ├── cascade_mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_instaboost_4x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_instaboost_4x_coco.py │ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_instaboost_4x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── lad/ │ │ │ │ ├── lad_r101_paa_r50_fpn_coco_1x.py │ │ │ │ ├── lad_r50_paa_r101_fpn_coco_1x.py │ │ │ │ └── metafile.yml │ │ │ ├── ld/ │ │ │ │ ├── ld_r101_gflv1_r101dcn_fpn_coco_2x.py │ │ │ │ ├── ld_r18_gflv1_r101_fpn_coco_1x.py │ │ │ │ ├── ld_r34_gflv1_r101_fpn_coco_1x.py │ │ │ │ ├── ld_r50_gflv1_r101_fpn_coco_1x.py │ │ │ │ └── metafile.yml │ │ │ ├── legacy_1.x/ │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_1x_coco_v1.py │ │ │ │ ├── faster_rcnn_r50_fpn_1x_coco_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_1x_coco_v1.py │ │ │ │ ├── retinanet_r50_caffe_fpn_1x_coco_v1.py │ │ │ │ ├── retinanet_r50_fpn_1x_coco_v1.py │ │ │ │ └── ssd300_coco_v1.py │ │ │ ├── libra_rcnn/ │ │ │ │ ├── libra_fast_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── libra_faster_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── libra_faster_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── libra_retinanet_r50_fpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── lvis/ │ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py │ │ │ │ └── mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py │ │ │ ├── mask2former/ │ │ │ │ ├── mask2former_r101_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_r101_lsj_8x2_50e_coco.py │ │ │ │ ├── mask2former_r50_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_r50_lsj_8x2_50e_coco.py │ │ │ │ ├── mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py │ │ │ │ ├── mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py │ │ │ │ ├── mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py │ │ │ │ ├── mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── mask_rcnn/ │ │ │ │ ├── mask_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_2x_coco.py │ │ │ │ ├── mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_c4_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_1x_wandb_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_2x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_fp16_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_poly_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_2x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x8d_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_2x_coco.py │ │ │ │ ├── mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── maskformer/ │ │ │ │ ├── maskformer_r50_mstrain_16x1_75e_coco.py │ │ │ │ ├── maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── ms_rcnn/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── ms_rcnn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── ms_rcnn_r101_caffe_fpn_2x_coco.py │ │ │ │ ├── ms_rcnn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── ms_rcnn_r50_caffe_fpn_2x_coco.py │ │ │ │ ├── ms_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── ms_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── ms_rcnn_x101_64x4d_fpn_1x_coco.py │ │ │ │ └── ms_rcnn_x101_64x4d_fpn_2x_coco.py │ │ │ ├── nas_fcos/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ │ │ └── nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py │ │ │ ├── nas_fpn/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_r50_fpn_crop640_50e_coco.py │ │ │ │ └── retinanet_r50_nasfpn_crop640_50e_coco.py │ │ │ ├── openimages/ │ │ │ │ ├── faster_rcnn_r50_fpn_32x2_1x_openimages.py │ │ │ │ ├── faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py │ │ │ │ ├── faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py │ │ │ │ ├── faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_r50_fpn_32x2_1x_openimages.py │ │ │ │ └── ssd300_32x8_36e_openimages.py │ │ │ ├── paa/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── paa_r101_fpn_1x_coco.py │ │ │ │ ├── paa_r101_fpn_2x_coco.py │ │ │ │ ├── paa_r101_fpn_mstrain_3x_coco.py │ │ │ │ ├── paa_r50_fpn_1.5x_coco.py │ │ │ │ ├── paa_r50_fpn_1x_coco.py │ │ │ │ ├── paa_r50_fpn_2x_coco.py │ │ │ │ └── paa_r50_fpn_mstrain_3x_coco.py │ │ │ ├── pafpn/ │ │ │ │ ├── faster_rcnn_r50_pafpn_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── panoptic_fpn/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── panoptic_fpn_r101_fpn_1x_coco.py │ │ │ │ ├── panoptic_fpn_r101_fpn_mstrain_3x_coco.py │ │ │ │ ├── panoptic_fpn_r50_fpn_1x_coco.py │ │ │ │ └── panoptic_fpn_r50_fpn_mstrain_3x_coco.py │ │ │ ├── pascal_voc/ │ │ │ │ ├── faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py │ │ │ │ ├── faster_rcnn_r50_fpn_1x_voc0712.py │ │ │ │ ├── faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py │ │ │ │ ├── retinanet_r50_fpn_1x_voc0712.py │ │ │ │ ├── ssd300_voc0712.py │ │ │ │ └── ssd512_voc0712.py │ │ │ ├── pisa/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── pisa_faster_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── pisa_mask_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── pisa_retinanet_r50_fpn_1x_coco.py │ │ │ │ ├── pisa_retinanet_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── pisa_ssd300_coco.py │ │ │ │ └── pisa_ssd512_coco.py │ │ │ ├── point_rend/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── point_rend_r50_caffe_fpn_mstrain_1x_coco.py │ │ │ │ └── point_rend_r50_caffe_fpn_mstrain_3x_coco.py │ │ │ ├── pvt/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_pvt-l_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvt-m_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvt-s_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvt-t_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvtv2-b0_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvtv2-b1_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvtv2-b2_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvtv2-b3_fpn_1x_coco.py │ │ │ │ ├── retinanet_pvtv2-b4_fpn_1x_coco.py │ │ │ │ └── retinanet_pvtv2-b5_fpn_1x_coco.py │ │ │ ├── queryinst/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py │ │ │ │ ├── queryinst_r101_fpn_mstrain_480-800_3x_coco.py │ │ │ │ ├── queryinst_r50_fpn_1x_coco.py │ │ │ │ ├── queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py │ │ │ │ └── queryinst_r50_fpn_mstrain_480-800_3x_coco.py │ │ │ ├── regnet/ │ │ │ │ ├── cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-12GF_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-4GF_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py │ │ │ │ ├── mask_rcnn_regnetx-8GF_fpn_1x_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_regnetx-1.6GF_fpn_1x_coco.py │ │ │ │ ├── retinanet_regnetx-3.2GF_fpn_1x_coco.py │ │ │ │ └── retinanet_regnetx-800MF_fpn_1x_coco.py │ │ │ ├── reppoints/ │ │ │ │ ├── bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py │ │ │ │ ├── bbox_r50_grid_fpn_gn-neck+head_1x_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ ├── reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py │ │ │ │ ├── reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py │ │ │ │ ├── reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py │ │ │ │ ├── reppoints_moment_r50_fpn_1x_coco.py │ │ │ │ ├── reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py │ │ │ │ ├── reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py │ │ │ │ ├── reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py │ │ │ │ └── reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py │ │ │ ├── res2net/ │ │ │ │ ├── cascade_mask_rcnn_r2_101_fpn_20e_coco.py │ │ │ │ ├── cascade_rcnn_r2_101_fpn_20e_coco.py │ │ │ │ ├── faster_rcnn_r2_101_fpn_2x_coco.py │ │ │ │ ├── htc_r2_101_fpn_20e_coco.py │ │ │ │ ├── mask_rcnn_r2_101_fpn_2x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── resnest/ │ │ │ │ ├── cascade_mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py │ │ │ │ ├── cascade_mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py │ │ │ │ ├── cascade_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py │ │ │ │ ├── cascade_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py │ │ │ │ ├── faster_rcnn_s101_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py │ │ │ │ ├── faster_rcnn_s50_fpn_syncbn-backbone+head_mstrain-range_1x_coco.py │ │ │ │ ├── mask_rcnn_s101_fpn_syncbn-backbone+head_mstrain_1x_coco.py │ │ │ │ ├── mask_rcnn_s50_fpn_syncbn-backbone+head_mstrain_1x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── resnet_strikes_back/ │ │ │ │ ├── cascade_mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py │ │ │ │ ├── faster_rcnn_r50_fpn_rsb-pretrain_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_rsb-pretrain_1x_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ └── retinanet_r50_fpn_rsb-pretrain_1x_coco.py │ │ │ ├── retinanet/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── retinanet_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── retinanet_r101_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── retinanet_r101_fpn_1x_coco.py │ │ │ │ ├── retinanet_r101_fpn_2x_coco.py │ │ │ │ ├── retinanet_r101_fpn_mstrain_640-800_3x_coco.py │ │ │ │ ├── retinanet_r18_fpn_1x8_1x_coco.py │ │ │ │ ├── retinanet_r18_fpn_1x_coco.py │ │ │ │ ├── retinanet_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── retinanet_r50_caffe_fpn_mstrain_1x_coco.py │ │ │ │ ├── retinanet_r50_caffe_fpn_mstrain_2x_coco.py │ │ │ │ ├── retinanet_r50_caffe_fpn_mstrain_3x_coco.py │ │ │ │ ├── retinanet_r50_fpn_1x_coco.py │ │ │ │ ├── retinanet_r50_fpn_2x_coco.py │ │ │ │ ├── retinanet_r50_fpn_90k_coco.py │ │ │ │ ├── retinanet_r50_fpn_fp16_1x_coco.py │ │ │ │ ├── retinanet_r50_fpn_mstrain_640-800_3x_coco.py │ │ │ │ ├── retinanet_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── retinanet_x101_32x4d_fpn_2x_coco.py │ │ │ │ ├── retinanet_x101_64x4d_fpn_1x_coco.py │ │ │ │ ├── retinanet_x101_64x4d_fpn_2x_coco.py │ │ │ │ └── retinanet_x101_64x4d_fpn_mstrain_640-800_3x_coco.py │ │ │ ├── rpn/ │ │ │ │ ├── rpn_r101_caffe_fpn_1x_coco.py │ │ │ │ ├── rpn_r101_fpn_1x_coco.py │ │ │ │ ├── rpn_r101_fpn_2x_coco.py │ │ │ │ ├── rpn_r50_caffe_c4_1x_coco.py │ │ │ │ ├── rpn_r50_caffe_fpn_1x_coco.py │ │ │ │ ├── rpn_r50_fpn_1x_coco.py │ │ │ │ ├── rpn_r50_fpn_2x_coco.py │ │ │ │ ├── rpn_x101_32x4d_fpn_1x_coco.py │ │ │ │ ├── rpn_x101_32x4d_fpn_2x_coco.py │ │ │ │ ├── rpn_x101_64x4d_fpn_1x_coco.py │ │ │ │ └── rpn_x101_64x4d_fpn_2x_coco.py │ │ │ ├── sabl/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── sabl_cascade_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── sabl_cascade_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── sabl_faster_rcnn_r101_fpn_1x_coco.py │ │ │ │ ├── sabl_faster_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── sabl_retinanet_r101_fpn_1x_coco.py │ │ │ │ ├── sabl_retinanet_r101_fpn_gn_1x_coco.py │ │ │ │ ├── sabl_retinanet_r101_fpn_gn_2x_ms_480_960_coco.py │ │ │ │ ├── sabl_retinanet_r101_fpn_gn_2x_ms_640_800_coco.py │ │ │ │ ├── sabl_retinanet_r50_fpn_1x_coco.py │ │ │ │ └── sabl_retinanet_r50_fpn_gn_1x_coco.py │ │ │ ├── scnet/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── scnet_r101_fpn_20e_coco.py │ │ │ │ ├── scnet_r50_fpn_1x_coco.py │ │ │ │ ├── scnet_r50_fpn_20e_coco.py │ │ │ │ ├── scnet_x101_64x4d_fpn_20e_coco.py │ │ │ │ └── scnet_x101_64x4d_fpn_8x1_20e_coco.py │ │ │ ├── scratch/ │ │ │ │ ├── faster_rcnn_r50_fpn_gn-all_scratch_6x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── seesaw_loss/ │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py │ │ │ │ ├── mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py │ │ │ │ └── metafile.yml │ │ │ ├── selfsup_pretrain/ │ │ │ │ ├── mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py │ │ │ │ └── mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py │ │ │ ├── simple_copy_paste/ │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py │ │ │ │ └── metafile.yml │ │ │ ├── solo/ │ │ │ │ ├── decoupled_solo_light_r50_fpn_3x_coco.py │ │ │ │ ├── decoupled_solo_r50_fpn_1x_coco.py │ │ │ │ ├── decoupled_solo_r50_fpn_3x_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ ├── solo_r50_fpn_1x_coco.py │ │ │ │ └── solo_r50_fpn_3x_coco.py │ │ │ ├── solov2/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── solov2_light_r18_fpn_3x_coco.py │ │ │ │ ├── solov2_light_r34_fpn_3x_coco.py │ │ │ │ ├── solov2_light_r50_dcn_fpn_3x_coco.py │ │ │ │ ├── solov2_light_r50_fpn_3x_coco.py │ │ │ │ ├── solov2_r101_dcn_fpn_3x_coco.py │ │ │ │ ├── solov2_r101_fpn_3x_coco.py │ │ │ │ ├── solov2_r50_fpn_1x_coco.py │ │ │ │ ├── solov2_r50_fpn_3x_coco.py │ │ │ │ └── solov2_x101_dcn_fpn_3x_coco.py │ │ │ ├── sparse_rcnn/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py │ │ │ │ ├── sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py │ │ │ │ ├── sparse_rcnn_r50_fpn_1x_coco.py │ │ │ │ ├── sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py │ │ │ │ └── sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py │ │ │ ├── ssd/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── ssd300_coco.py │ │ │ │ ├── ssd300_fp16_coco.py │ │ │ │ ├── ssd512_coco.py │ │ │ │ ├── ssd512_fp16_coco.py │ │ │ │ └── ssdlite_mobilenetv2_scratch_600e_coco.py │ │ │ ├── strong_baselines/ │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py │ │ │ │ ├── mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py │ │ │ │ ├── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py │ │ │ │ └── mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py │ │ │ ├── swin/ │ │ │ │ ├── mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py │ │ │ │ ├── mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py │ │ │ │ ├── mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py │ │ │ │ ├── mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py │ │ │ │ ├── metafile.yml │ │ │ │ └── retinanet_swin-t-p4-w7_fpn_1x_coco.py │ │ │ ├── timm_example/ │ │ │ │ ├── retinanet_timm_efficientnet_b1_fpn_1x_coco.py │ │ │ │ └── retinanet_timm_tv_resnet50_fpn_1x_coco.py │ │ │ ├── tood/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── tood_r101_fpn_mstrain_2x_coco.py │ │ │ │ ├── tood_r50_fpn_1x_coco.py │ │ │ │ ├── tood_r50_fpn_anchor_based_1x_coco.py │ │ │ │ ├── tood_r50_fpn_mstrain_2x_coco.py │ │ │ │ ├── tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py │ │ │ │ └── tood_x101_64x4d_fpn_mstrain_2x_coco.py │ │ │ ├── tridentnet/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── tridentnet_r50_caffe_1x_coco.py │ │ │ │ ├── tridentnet_r50_caffe_mstrain_1x_coco.py │ │ │ │ └── tridentnet_r50_caffe_mstrain_3x_coco.py │ │ │ ├── vfnet/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── vfnet_r101_fpn_1x_coco.py │ │ │ │ ├── vfnet_r101_fpn_2x_coco.py │ │ │ │ ├── vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── vfnet_r101_fpn_mstrain_2x_coco.py │ │ │ │ ├── vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── vfnet_r2_101_fpn_mstrain_2x_coco.py │ │ │ │ ├── vfnet_r50_fpn_1x_coco.py │ │ │ │ ├── vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── vfnet_r50_fpn_mstrain_2x_coco.py │ │ │ │ ├── vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py │ │ │ │ ├── vfnet_x101_32x4d_fpn_mstrain_2x_coco.py │ │ │ │ ├── vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py │ │ │ │ └── vfnet_x101_64x4d_fpn_mstrain_2x_coco.py │ │ │ ├── wider_face/ │ │ │ │ └── ssd300_wider_face.py │ │ │ ├── yolact/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── yolact_r101_1x8_coco.py │ │ │ │ ├── yolact_r50_1x8_coco.py │ │ │ │ └── yolact_r50_8x8_coco.py │ │ │ ├── yolo/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── yolov3_d53_320_273e_coco.py │ │ │ │ ├── yolov3_d53_fp16_mstrain-608_273e_coco.py │ │ │ │ ├── yolov3_d53_mstrain-416_273e_coco.py │ │ │ │ ├── yolov3_d53_mstrain-608_273e_coco.py │ │ │ │ ├── yolov3_mobilenetv2_320_300e_coco.py │ │ │ │ └── yolov3_mobilenetv2_mstrain-416_300e_coco.py │ │ │ ├── yolof/ │ │ │ │ ├── metafile.yml │ │ │ │ ├── yolof_r50_c5_8x8_1x_coco.py │ │ │ │ └── yolof_r50_c5_8x8_iter-1x_coco.py │ │ │ └── yolox/ │ │ │ ├── metafile.yml │ │ │ ├── yolox_l_8x8_300e_coco.py │ │ │ ├── yolox_m_8x8_300e_coco.py │ │ │ ├── yolox_nano_8x8_300e_coco.py │ │ │ ├── yolox_s_8x8_300e_coco.py │ │ │ ├── yolox_tiny_8x8_300e_coco.py │ │ │ └── yolox_x_8x8_300e_coco.py │ │ ├── docs/ │ │ │ ├── en/ │ │ │ │ ├── Makefile │ │ │ │ ├── _static/ │ │ │ │ │ └── css/ │ │ │ │ │ └── readthedocs.css │ │ │ │ ├── api.rst │ │ │ │ ├── conf.py │ │ │ │ ├── index.rst │ │ │ │ ├── make.bat │ │ │ │ ├── stat.py │ │ │ │ └── tutorials/ │ │ │ │ └── index.rst │ │ │ └── zh_cn/ │ │ │ ├── Makefile │ │ │ ├── _static/ │ │ │ │ └── css/ │ │ │ │ └── readthedocs.css │ │ │ ├── api.rst │ │ │ ├── conf.py │ │ │ ├── index.rst │ │ │ ├── make.bat │ │ │ ├── stat.py │ │ │ └── tutorials/ │ │ │ └── index.rst │ │ ├── mmdet/ │ │ │ ├── __init__.py │ │ │ ├── apis/ │ │ │ │ ├── __init__.py │ │ │ │ ├── inference.py │ │ │ │ ├── test.py │ │ │ │ └── train.py │ │ │ ├── core/ │ │ │ │ ├── __init__.py │ │ │ │ ├── anchor/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── anchor_generator.py │ │ │ │ │ ├── builder.py │ │ │ │ │ ├── point_generator.py │ │ │ │ │ └── utils.py │ │ │ │ ├── bbox/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── assigners/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── approx_max_iou_assigner.py │ │ │ │ │ │ ├── assign_result.py │ │ │ │ │ │ ├── atss_assigner.py │ │ │ │ │ │ ├── base_assigner.py │ │ │ │ │ │ ├── center_region_assigner.py │ │ │ │ │ │ ├── grid_assigner.py │ │ │ │ │ │ ├── hungarian_assigner.py │ │ │ │ │ │ ├── mask_hungarian_assigner.py │ │ │ │ │ │ ├── max_iou_assigner.py │ │ │ │ │ │ ├── point_assigner.py │ │ │ │ │ │ ├── region_assigner.py │ │ │ │ │ │ ├── sim_ota_assigner.py │ │ │ │ │ │ ├── task_aligned_assigner.py │ │ │ │ │ │ └── uniform_assigner.py │ │ │ │ │ ├── builder.py │ │ │ │ │ ├── coder/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base_bbox_coder.py │ │ │ │ │ │ ├── bucketing_bbox_coder.py │ │ │ │ │ │ ├── delta_xywh_bbox_coder.py │ │ │ │ │ │ ├── distance_point_bbox_coder.py │ │ │ │ │ │ ├── legacy_delta_xywh_bbox_coder.py │ │ │ │ │ │ ├── pseudo_bbox_coder.py │ │ │ │ │ │ ├── tblr_bbox_coder.py │ │ │ │ │ │ └── yolo_bbox_coder.py │ │ │ │ │ ├── demodata.py │ │ │ │ │ ├── iou_calculators/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── builder.py │ │ │ │ │ │ └── iou2d_calculator.py │ │ │ │ │ ├── match_costs/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── builder.py │ │ │ │ │ │ └── match_cost.py │ │ │ │ │ ├── samplers/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base_sampler.py │ │ │ │ │ │ ├── combined_sampler.py │ │ │ │ │ │ ├── instance_balanced_pos_sampler.py │ │ │ │ │ │ ├── iou_balanced_neg_sampler.py │ │ │ │ │ │ ├── mask_pseudo_sampler.py │ │ │ │ │ │ ├── mask_sampling_result.py │ │ │ │ │ │ ├── ohem_sampler.py │ │ │ │ │ │ ├── pseudo_sampler.py │ │ │ │ │ │ ├── random_sampler.py │ │ │ │ │ │ ├── sampling_result.py │ │ │ │ │ │ └── score_hlr_sampler.py │ │ │ │ │ └── transforms.py │ │ │ │ ├── data_structures/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── general_data.py │ │ │ │ │ └── instance_data.py │ │ │ │ ├── evaluation/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bbox_overlaps.py │ │ │ │ │ ├── class_names.py │ │ │ │ │ ├── eval_hooks.py │ │ │ │ │ ├── mean_ap.py │ │ │ │ │ ├── panoptic_utils.py │ │ │ │ │ └── recall.py │ │ │ │ ├── export/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── model_wrappers.py │ │ │ │ │ ├── onnx_helper.py │ │ │ │ │ └── pytorch2onnx.py │ │ │ │ ├── hook/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── checkloss_hook.py │ │ │ │ │ ├── ema.py │ │ │ │ │ ├── memory_profiler_hook.py │ │ │ │ │ ├── set_epoch_info_hook.py │ │ │ │ │ ├── sync_norm_hook.py │ │ │ │ │ ├── sync_random_size_hook.py │ │ │ │ │ ├── wandblogger_hook.py │ │ │ │ │ ├── yolox_lrupdater_hook.py │ │ │ │ │ └── yolox_mode_switch_hook.py │ │ │ │ ├── mask/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── mask_target.py │ │ │ │ │ ├── structures.py │ │ │ │ │ └── utils.py │ │ │ │ ├── optimizers/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── builder.py │ │ │ │ │ └── layer_decay_optimizer_constructor.py │ │ │ │ ├── post_processing/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bbox_nms.py │ │ │ │ │ ├── matrix_nms.py │ │ │ │ │ └── merge_augs.py │ │ │ │ ├── utils/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── dist_utils.py │ │ │ │ │ └── misc.py │ │ │ │ └── visualization/ │ │ │ │ ├── __init__.py │ │ │ │ ├── image.py │ │ │ │ └── palette.py │ │ │ ├── models/ │ │ │ │ ├── __init__.py │ │ │ │ ├── backbones/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── csp_darknet.py │ │ │ │ │ ├── darknet.py │ │ │ │ │ ├── detectors_resnet.py │ │ │ │ │ ├── detectors_resnext.py │ │ │ │ │ ├── efficientnet.py │ │ │ │ │ ├── hourglass.py │ │ │ │ │ ├── hrnet.py │ │ │ │ │ ├── mobilenet_v2.py │ │ │ │ │ ├── pvt.py │ │ │ │ │ ├── regnet.py │ │ │ │ │ ├── res2net.py │ │ │ │ │ ├── resnest.py │ │ │ │ │ ├── resnet.py │ │ │ │ │ ├── resnext.py │ │ │ │ │ ├── ssd_vgg.py │ │ │ │ │ ├── swin.py │ │ │ │ │ └── trident_resnet.py │ │ │ │ ├── builder.py │ │ │ │ ├── dense_heads/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── anchor_free_head.py │ │ │ │ │ ├── anchor_head.py │ │ │ │ │ ├── atss_head.py │ │ │ │ │ ├── autoassign_head.py │ │ │ │ │ ├── base_dense_head.py │ │ │ │ │ ├── base_mask_head.py │ │ │ │ │ ├── cascade_rpn_head.py │ │ │ │ │ ├── centernet_head.py │ │ │ │ │ ├── centripetal_head.py │ │ │ │ │ ├── corner_head.py │ │ │ │ │ ├── ddod_head.py │ │ │ │ │ ├── deformable_detr_head.py │ │ │ │ │ ├── dense_test_mixins.py │ │ │ │ │ ├── detr_head.py │ │ │ │ │ ├── embedding_rpn_head.py │ │ │ │ │ ├── fcos_head.py │ │ │ │ │ ├── fovea_head.py │ │ │ │ │ ├── free_anchor_retina_head.py │ │ │ │ │ ├── fsaf_head.py │ │ │ │ │ ├── ga_retina_head.py │ │ │ │ │ ├── ga_rpn_head.py │ │ │ │ │ ├── gfl_head.py │ │ │ │ │ ├── guided_anchor_head.py │ │ │ │ │ ├── lad_head.py │ │ │ │ │ ├── ld_head.py │ │ │ │ │ ├── mask2former_head.py │ │ │ │ │ ├── maskformer_head.py │ │ │ │ │ ├── nasfcos_head.py │ │ │ │ │ ├── paa_head.py │ │ │ │ │ ├── pisa_retinanet_head.py │ │ │ │ │ ├── pisa_ssd_head.py │ │ │ │ │ ├── reppoints_head.py │ │ │ │ │ ├── retina_head.py │ │ │ │ │ ├── retina_sepbn_head.py │ │ │ │ │ ├── rpn_head.py │ │ │ │ │ ├── sabl_retina_head.py │ │ │ │ │ ├── solo_head.py │ │ │ │ │ ├── solov2_head.py │ │ │ │ │ ├── ssd_head.py │ │ │ │ │ ├── tood_head.py │ │ │ │ │ ├── vfnet_head.py │ │ │ │ │ ├── yolact_head.py │ │ │ │ │ ├── yolo_head.py │ │ │ │ │ ├── yolof_head.py │ │ │ │ │ └── yolox_head.py │ │ │ │ ├── detectors/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── atss.py │ │ │ │ │ ├── autoassign.py │ │ │ │ │ ├── base.py │ │ │ │ │ ├── cascade_rcnn.py │ │ │ │ │ ├── centernet.py │ │ │ │ │ ├── cornernet.py │ │ │ │ │ ├── ddod.py │ │ │ │ │ ├── deformable_detr.py │ │ │ │ │ ├── detr.py │ │ │ │ │ ├── fast_rcnn.py │ │ │ │ │ ├── faster_rcnn.py │ │ │ │ │ ├── fcos.py │ │ │ │ │ ├── fovea.py │ │ │ │ │ ├── fsaf.py │ │ │ │ │ ├── gfl.py │ │ │ │ │ ├── grid_rcnn.py │ │ │ │ │ ├── htc.py │ │ │ │ │ ├── kd_one_stage.py │ │ │ │ │ ├── lad.py │ │ │ │ │ ├── mask2former.py │ │ │ │ │ ├── mask_rcnn.py │ │ │ │ │ ├── mask_scoring_rcnn.py │ │ │ │ │ ├── maskformer.py │ │ │ │ │ ├── nasfcos.py │ │ │ │ │ ├── paa.py │ │ │ │ │ ├── panoptic_fpn.py │ │ │ │ │ ├── panoptic_two_stage_segmentor.py │ │ │ │ │ ├── point_rend.py │ │ │ │ │ ├── queryinst.py │ │ │ │ │ ├── reppoints_detector.py │ │ │ │ │ ├── retinanet.py │ │ │ │ │ ├── rpn.py │ │ │ │ │ ├── scnet.py │ │ │ │ │ ├── single_stage.py │ │ │ │ │ ├── single_stage_instance_seg.py │ │ │ │ │ ├── solo.py │ │ │ │ │ ├── solov2.py │ │ │ │ │ ├── sparse_rcnn.py │ │ │ │ │ ├── tood.py │ │ │ │ │ ├── trident_faster_rcnn.py │ │ │ │ │ ├── two_stage.py │ │ │ │ │ ├── vfnet.py │ │ │ │ │ ├── yolact.py │ │ │ │ │ ├── yolo.py │ │ │ │ │ ├── yolof.py │ │ │ │ │ └── yolox.py │ │ │ │ ├── losses/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── accuracy.py │ │ │ │ │ ├── ae_loss.py │ │ │ │ │ ├── balanced_l1_loss.py │ │ │ │ │ ├── cross_entropy_loss.py │ │ │ │ │ ├── dice_loss.py │ │ │ │ │ ├── focal_loss.py │ │ │ │ │ ├── gaussian_focal_loss.py │ │ │ │ │ ├── gfocal_loss.py │ │ │ │ │ ├── ghm_loss.py │ │ │ │ │ ├── iou_loss.py │ │ │ │ │ ├── kd_loss.py │ │ │ │ │ ├── mse_loss.py │ │ │ │ │ ├── pisa_loss.py │ │ │ │ │ ├── seesaw_loss.py │ │ │ │ │ ├── smooth_l1_loss.py │ │ │ │ │ ├── utils.py │ │ │ │ │ └── varifocal_loss.py │ │ │ │ ├── necks/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── bfp.py │ │ │ │ │ ├── channel_mapper.py │ │ │ │ │ ├── ct_resnet_neck.py │ │ │ │ │ ├── dilated_encoder.py │ │ │ │ │ ├── dyhead.py │ │ │ │ │ ├── fpg.py │ │ │ │ │ ├── fpn.py │ │ │ │ │ ├── fpn_carafe.py │ │ │ │ │ ├── hrfpn.py │ │ │ │ │ ├── nas_fpn.py │ │ │ │ │ ├── nasfcos_fpn.py │ │ │ │ │ ├── pafpn.py │ │ │ │ │ ├── rfp.py │ │ │ │ │ ├── ssd_neck.py │ │ │ │ │ ├── yolo_neck.py │ │ │ │ │ └── yolox_pafpn.py │ │ │ │ ├── plugins/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── dropblock.py │ │ │ │ │ ├── msdeformattn_pixel_decoder.py │ │ │ │ │ └── pixel_decoder.py │ │ │ │ ├── roi_heads/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_roi_head.py │ │ │ │ │ ├── bbox_heads/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── bbox_head.py │ │ │ │ │ │ ├── convfc_bbox_head.py │ │ │ │ │ │ ├── dii_head.py │ │ │ │ │ │ ├── double_bbox_head.py │ │ │ │ │ │ ├── sabl_head.py │ │ │ │ │ │ └── scnet_bbox_head.py │ │ │ │ │ ├── cascade_roi_head.py │ │ │ │ │ ├── double_roi_head.py │ │ │ │ │ ├── dynamic_roi_head.py │ │ │ │ │ ├── grid_roi_head.py │ │ │ │ │ ├── htc_roi_head.py │ │ │ │ │ ├── mask_heads/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── coarse_mask_head.py │ │ │ │ │ │ ├── dynamic_mask_head.py │ │ │ │ │ │ ├── fcn_mask_head.py │ │ │ │ │ │ ├── feature_relay_head.py │ │ │ │ │ │ ├── fused_semantic_head.py │ │ │ │ │ │ ├── global_context_head.py │ │ │ │ │ │ ├── grid_head.py │ │ │ │ │ │ ├── htc_mask_head.py │ │ │ │ │ │ ├── mask_point_head.py │ │ │ │ │ │ ├── maskiou_head.py │ │ │ │ │ │ ├── scnet_mask_head.py │ │ │ │ │ │ └── scnet_semantic_head.py │ │ │ │ │ ├── mask_scoring_roi_head.py │ │ │ │ │ ├── pisa_roi_head.py │ │ │ │ │ ├── point_rend_roi_head.py │ │ │ │ │ ├── roi_extractors/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ ├── base_roi_extractor.py │ │ │ │ │ │ ├── generic_roi_extractor.py │ │ │ │ │ │ └── single_level_roi_extractor.py │ │ │ │ │ ├── scnet_roi_head.py │ │ │ │ │ ├── shared_heads/ │ │ │ │ │ │ ├── __init__.py │ │ │ │ │ │ └── res_layer.py │ │ │ │ │ ├── sparse_roi_head.py │ │ │ │ │ ├── standard_roi_head.py │ │ │ │ │ ├── test_mixins.py │ │ │ │ │ └── trident_roi_head.py │ │ │ │ ├── seg_heads/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_semantic_head.py │ │ │ │ │ ├── panoptic_fpn_head.py │ │ │ │ │ └── panoptic_fusion_heads/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── base_panoptic_fusion_head.py │ │ │ │ │ ├── heuristic_fusion_head.py │ │ │ │ │ └── maskformer_fusion_head.py │ │ │ │ └── utils/ │ │ │ │ ├── __init__.py │ │ │ │ ├── brick_wrappers.py │ │ │ │ ├── builder.py │ │ │ │ ├── ckpt_convert.py │ │ │ │ ├── conv_upsample.py │ │ │ │ ├── csp_layer.py │ │ │ │ ├── gaussian_target.py │ │ │ │ ├── inverted_residual.py │ │ │ │ ├── make_divisible.py │ │ │ │ ├── misc.py │ │ │ │ ├── normed_predictor.py │ │ │ │ ├── panoptic_gt_processing.py │ │ │ │ ├── point_sample.py │ │ │ │ ├── positional_encoding.py │ │ │ │ ├── res_layer.py │ │ │ │ ├── se_layer.py │ │ │ │ └── transformer.py │ │ │ ├── utils/ │ │ │ │ ├── __init__.py │ │ │ │ ├── collect_env.py │ │ │ │ ├── compat_config.py │ │ │ │ ├── contextmanagers.py │ │ │ │ ├── logger.py │ │ │ │ ├── memory.py │ │ │ │ ├── misc.py │ │ │ │ ├── profiling.py │ │ │ │ ├── replace_cfg_vals.py │ │ │ │ ├── setup_env.py │ │ │ │ ├── split_batch.py │ │ │ │ ├── util_distribution.py │ │ │ │ ├── util_mixins.py │ │ │ │ └── util_random.py │ │ │ └── version.py │ │ ├── model-index.yml │ │ ├── pytest.ini │ │ ├── requirements/ │ │ │ ├── albu.txt │ │ │ ├── build.txt │ │ │ ├── docs.txt │ │ │ ├── mminstall.txt │ │ │ ├── optional.txt │ │ │ ├── readthedocs.txt │ │ │ ├── runtime.txt │ │ │ └── tests.txt │ │ ├── requirements.txt │ │ ├── setup.cfg │ │ ├── setup.py │ │ ├── tests/ │ │ │ ├── test_data/ │ │ │ │ ├── test_datasets/ │ │ │ │ │ ├── test_coco_dataset.py │ │ │ │ │ ├── test_common.py │ │ │ │ │ ├── test_custom_dataset.py │ │ │ │ │ ├── test_dataset_wrapper.py │ │ │ │ │ ├── test_openimages_dataset.py │ │ │ │ │ ├── test_panoptic_dataset.py │ │ │ │ │ └── test_xml_dataset.py │ │ │ │ ├── test_pipelines/ │ │ │ │ │ ├── test_formatting.py │ │ │ │ │ ├── test_loading.py │ │ │ │ │ ├── test_sampler.py │ │ │ │ │ └── test_transform/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_img_augment.py │ │ │ │ │ ├── test_models_aug_test.py │ │ │ │ │ ├── test_rotate.py │ │ │ │ │ ├── test_shear.py │ │ │ │ │ ├── test_transform.py │ │ │ │ │ ├── test_translate.py │ │ │ │ │ └── utils.py │ │ │ │ └── test_utils.py │ │ │ ├── test_downstream/ │ │ │ │ └── test_mmtrack.py │ │ │ ├── test_metrics/ │ │ │ │ ├── test_box_overlap.py │ │ │ │ ├── test_losses.py │ │ │ │ ├── test_mean_ap.py │ │ │ │ └── test_recall.py │ │ │ ├── test_models/ │ │ │ │ ├── test_backbones/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_csp_darknet.py │ │ │ │ │ ├── test_detectors_resnet.py │ │ │ │ │ ├── test_efficientnet.py │ │ │ │ │ ├── test_hourglass.py │ │ │ │ │ ├── test_hrnet.py │ │ │ │ │ ├── test_mobilenet_v2.py │ │ │ │ │ ├── test_pvt.py │ │ │ │ │ ├── test_regnet.py │ │ │ │ │ ├── test_renext.py │ │ │ │ │ ├── test_res2net.py │ │ │ │ │ ├── test_resnest.py │ │ │ │ │ ├── test_resnet.py │ │ │ │ │ ├── test_swin.py │ │ │ │ │ ├── test_trident_resnet.py │ │ │ │ │ └── utils.py │ │ │ │ ├── test_dense_heads/ │ │ │ │ │ ├── test_anchor_head.py │ │ │ │ │ ├── test_atss_head.py │ │ │ │ │ ├── test_autoassign_head.py │ │ │ │ │ ├── test_centernet_head.py │ │ │ │ │ ├── test_corner_head.py │ │ │ │ │ ├── test_ddod_head.py │ │ │ │ │ ├── test_dense_heads_attr.py │ │ │ │ │ ├── test_detr_head.py │ │ │ │ │ ├── test_fcos_head.py │ │ │ │ │ ├── test_fsaf_head.py │ │ │ │ │ ├── test_ga_anchor_head.py │ │ │ │ │ ├── test_gfl_head.py │ │ │ │ │ ├── test_lad_head.py │ │ │ │ │ ├── test_ld_head.py │ │ │ │ │ ├── test_mask2former_head.py │ │ │ │ │ ├── test_maskformer_head.py │ │ │ │ │ ├── test_paa_head.py │ │ │ │ │ ├── test_pisa_head.py │ │ │ │ │ ├── test_sabl_retina_head.py │ │ │ │ │ ├── test_solo_head.py │ │ │ │ │ ├── test_tood_head.py │ │ │ │ │ ├── test_vfnet_head.py │ │ │ │ │ ├── test_yolact_head.py │ │ │ │ │ ├── test_yolof_head.py │ │ │ │ │ └── test_yolox_head.py │ │ │ │ ├── test_forward.py │ │ │ │ ├── test_loss.py │ │ │ │ ├── test_loss_compatibility.py │ │ │ │ ├── test_necks.py │ │ │ │ ├── test_plugins.py │ │ │ │ ├── test_roi_heads/ │ │ │ │ │ ├── __init__.py │ │ │ │ │ ├── test_bbox_head.py │ │ │ │ │ ├── test_mask_head.py │ │ │ │ │ ├── test_roi_extractor.py │ │ │ │ │ ├── test_sabl_bbox_head.py │ │ │ │ │ └── utils.py │ │ │ │ ├── test_seg_heads/ │ │ │ │ │ └── test_maskformer_fusion_head.py │ │ │ │ └── test_utils/ │ │ │ │ ├── test_brick_wrappers.py │ │ │ │ ├── test_conv_upsample.py │ │ │ │ ├── test_inverted_residual.py │ │ │ │ ├── test_model_misc.py │ │ │ │ ├── test_position_encoding.py │ │ │ │ ├── test_se_layer.py │ │ │ │ └── test_transformer.py │ │ │ ├── test_onnx/ │ │ │ │ ├── __init__.py │ │ │ │ ├── test_head.py │ │ │ │ ├── test_neck.py │ │ │ │ └── utils.py │ │ │ ├── test_runtime/ │ │ │ │ ├── async_benchmark.py │ │ │ │ ├── test_apis.py │ │ │ │ ├── test_async.py │ │ │ │ ├── test_config.py │ │ │ │ ├── test_eval_hook.py │ │ │ │ └── test_fp16.py │ │ │ └── test_utils/ │ │ │ ├── test_anchor.py │ │ │ ├── test_assigner.py │ │ │ ├── test_coder.py │ │ │ ├── test_compat_config.py │ │ │ ├── test_general_data.py │ │ │ ├── test_hook.py │ │ │ ├── test_layer_decay_optimizer_constructor.py │ │ │ ├── test_logger.py │ │ │ ├── test_masks.py │ │ │ ├── test_memory.py │ │ │ ├── test_misc.py │ │ │ ├── test_nms.py │ │ │ ├── test_replace_cfg_vals.py │ │ │ ├── test_setup_env.py │ │ │ ├── test_split_batch.py │ │ │ ├── test_version.py │ │ │ └── test_visualization.py │ │ └── tools/ │ │ ├── analysis_tools/ │ │ │ ├── analyze_logs.py │ │ │ ├── analyze_results.py │ │ │ ├── benchmark.py │ │ │ ├── coco_error_analysis.py │ │ │ ├── confusion_matrix.py │ │ │ ├── eval_metric.py │ │ │ ├── get_flops.py │ │ │ ├── optimize_anchors.py │ │ │ ├── robustness_eval.py │ │ │ └── test_robustness.py │ │ ├── dataset_converters/ │ │ │ ├── cityscapes.py │ │ │ ├── images2coco.py │ │ │ └── pascal_voc.py │ │ ├── deployment/ │ │ │ ├── mmdet2torchserve.py │ │ │ ├── mmdet_handler.py │ │ │ ├── onnx2tensorrt.py │ │ │ ├── pytorch2onnx.py │ │ │ ├── test.py │ │ │ └── test_torchserver.py │ │ ├── dist_test.sh │ │ ├── dist_train.sh │ │ ├── misc/ │ │ │ ├── browse_dataset.py │ │ │ ├── download_dataset.py │ │ │ ├── gen_coco_panoptic_test_info.py │ │ │ ├── get_image_metas.py │ │ │ ├── print_config.py │ │ │ └── split_coco.py │ │ ├── model_converters/ │ │ │ ├── detectron2pytorch.py │ │ │ ├── publish_model.py │ │ │ ├── regnet2mmdet.py │ │ │ ├── selfsup2mmdet.py │ │ │ ├── upgrade_model_version.py │ │ │ └── upgrade_ssd_version.py │ │ ├── slurm_test.sh │ │ ├── slurm_train.sh │ │ ├── test.py │ │ └── train.py │ ├── models_menu/ │ │ ├── mmscraper.py │ │ ├── models_json.json │ │ ├── samScraper.py │ │ └── sam_models.json │ ├── setup.py │ ├── tempCodeRunnerFile.py │ └── trackers/ │ ├── __init__.py │ ├── botsort/ │ │ ├── basetrack.py │ │ ├── bot_sort.py │ │ ├── configs/ │ │ │ └── botsort.yaml │ │ ├── gmc.py │ │ ├── kalman_filter.py │ │ ├── matching.py │ │ └── reid_multibackend.py │ ├── bytetrack/ │ │ ├── basetrack.py │ │ ├── byte_tracker.py │ │ ├── configs/ │ │ │ └── bytetrack.yaml │ │ ├── kalman_filter.py │ │ └── matching.py │ ├── deepocsort/ │ │ ├── __init__.py │ │ ├── args.py │ │ ├── association.py │ │ ├── cmc.py │ │ ├── configs/ │ │ │ └── deepocsort.yaml │ │ ├── embedding.py │ │ ├── kalmanfilter.py │ │ ├── ocsort.py │ │ └── reid_multibackend.py │ ├── multi_tracker_zoo.py │ ├── ocsort/ │ │ ├── association.py │ │ ├── configs/ │ │ │ └── ocsort.yaml │ │ ├── kalmanfilter.py │ │ └── ocsort.py │ ├── reid_export.py │ └── strongsort/ │ ├── .gitignore │ ├── __init__.py │ ├── configs/ │ │ └── strongsort.yaml │ ├── deep/ │ │ ├── checkpoint/ │ │ │ └── .gitkeep │ │ ├── models/ │ │ │ ├── __init__.py │ │ │ ├── densenet.py │ │ │ ├── hacnn.py │ │ │ ├── inceptionresnetv2.py │ │ │ ├── inceptionv4.py │ │ │ ├── mlfn.py │ │ │ ├── mobilenetv2.py │ │ │ ├── mudeep.py │ │ │ ├── nasnet.py │ │ │ ├── osnet.py │ │ │ ├── osnet_ain.py │ │ │ ├── pcb.py │ │ │ ├── resnet.py │ │ │ ├── resnet_ibn_a.py │ │ │ ├── resnet_ibn_b.py │ │ │ ├── resnetmid.py │ │ │ ├── senet.py │ │ │ ├── shufflenet.py │ │ │ ├── shufflenetv2.py │ │ │ ├── squeezenet.py │ │ │ └── xception.py │ │ └── reid_model_factory.py │ ├── reid_multibackend.py │ ├── sort/ │ │ ├── __init__.py │ │ ├── detection.py │ │ ├── iou_matching.py │ │ ├── kalman_filter.py │ │ ├── linear_assignment.py │ │ ├── nn_matching.py │ │ ├── preprocessing.py │ │ ├── track.py │ │ └── tracker.py │ ├── strong_sort.py │ └── utils/ │ ├── __init__.py │ ├── asserts.py │ ├── draw.py │ ├── evaluation.py │ ├── io.py │ ├── json_logger.py │ ├── log.py │ ├── parser.py │ └── tools.py ├── LICENSE ├── MANIFEST.in ├── README.md ├── additional_scripts/ │ └── coco_eval.py ├── docs/ │ ├── Installation/ │ │ ├── executable.md │ │ ├── full installation.md │ │ ├── index.yml │ │ └── problems.md │ ├── index.md │ ├── main_features/ │ │ ├── Export.md │ │ ├── SAM.md │ │ ├── index.yml │ │ ├── inputs.md │ │ ├── segmentation.md │ │ └── tracking/ │ │ ├── index.yml │ │ ├── interpolation.md │ │ └── tracking.md │ ├── model_selection/ │ │ ├── index.yml │ │ ├── merge.md │ │ └── model_explorer.md │ ├── retype.yml │ └── user_interface.md ├── releasenotes.md ├── requirements.txt ├── setup.py └── yolo training commands.txt
Showing preview only (332K chars total). Download the full file or copy to clipboard to get everything.
SYMBOL INDEX (4064 symbols across 564 files)
FILE: DLTA_AI_app/__main__.py
function main (line 18) | def main():
FILE: DLTA_AI_app/inferencing.py
class models_inference (line 17) | class models_inference():
method __init__ (line 18) | def __init__(self):
method full_points (line 22) | def full_points(bbox):
method decode_file (line 26) | def decode_file(self, img, model, classdict, threshold=0.3, img_array_...
method polegonise (line 126) | def polegonise(self, results0, results1, classdict, threshold=0.3, sho...
method merge_masks (line 172) | def merge_masks(self):
function count_instances (line 278) | def count_instances(annotating_models):
FILE: DLTA_AI_app/labelme/app.py
class MainWindow (line 67) | class MainWindow(QtWidgets.QMainWindow):
method __init__ (line 73) | def __init__(
method menu (line 1116) | def menu(self, title, actions=None):
method toolbar (line 1122) | def toolbar(self, title, actions=None):
method noShapes (line 1134) | def noShapes(self):
method populateModeActions (line 1137) | def populateModeActions(self):
method setDirty (line 1149) | def setDirty(self):
method setClean (line 1170) | def setClean(self):
method toggleActions (line 1184) | def toggleActions(self, value=True):
method canvasShapeEdgeSelected (line 1191) | def canvasShapeEdgeSelected(self, selected, shape):
method queueEvent (line 1196) | def queueEvent(self, function):
method status (line 1199) | def status(self, message, delay=5000):
method resetState (line 1202) | def resetState(self):
method currentItem (line 1212) | def currentItem(self):
method addRecentFile (line 1218) | def addRecentFile(self, filename):
method Escape_clicked (line 1227) | def Escape_clicked(self):
method undoShapeEdit (line 1241) | def undoShapeEdit(self):
method toggleDrawingSensitive (line 1247) | def toggleDrawingSensitive(self, drawing=True):
method toggleDrawMode (line 1256) | def toggleDrawMode(self, edit=True, createMode="polygon"):
method setEditMode (line 1268) | def setEditMode(self):
method updateFileMenu (line 1280) | def updateFileMenu(self):
method update_models_menu (line 1297) | def update_models_menu(self):
method add_tracking_models_menu (line 1319) | def add_tracking_models_menu(self):
method add_certain_area_menu (line 1357) | def add_certain_area_menu(self):
method update_tracking_method (line 1375) | def update_tracking_method(self, method='bytetrack'):
method popLabelListMenu (line 1394) | def popLabelListMenu(self, point):
method validateLabel (line 1397) | def validateLabel(self, label):
method setCreateMode (line 1409) | def setCreateMode(self):
method editLabel (line 1414) | def editLabel(self, item=None):
method mark_as_key (line 1507) | def mark_as_key(self):
method remove_all_keyframes (line 1537) | def remove_all_keyframes(self):
method rec_frame_for_id (line 1545) | def rec_frame_for_id(self, id, frame, type_='add'):
method interpolateMENU (line 1573) | def interpolateMENU(self, item=None):
method interpolate (line 1630) | def interpolate(self, id, only_edited=False):
method interpolate_with_sam (line 1725) | def interpolate_with_sam(self, idsLISTX, only_edited=False):
method get_frame_by_idx (line 1854) | def get_frame_by_idx(self, frameIDX):
method scaleMENU (line 1859) | def scaleMENU(self):
method ctrlCopy (line 1879) | def ctrlCopy(self):
method ctrlPaste (line 1890) | def ctrlPaste(self):
method fileSearchChanged (line 1918) | def fileSearchChanged(self):
method fileSelectionChanged (line 1925) | def fileSelectionChanged(self):
method shapeSelectionChanged (line 1942) | def shapeSelectionChanged(self, selected_shapes):
method addLabel (line 1962) | def addLabel(self, shape):
method _get_rgb_by_label (line 1993) | def _get_rgb_by_label(self, label):
method remLabels (line 2010) | def remLabels(self, shapes):
method loadShapes (line 2015) | def loadShapes(self, shapes, replace=True):
method loadLabels (line 2029) | def loadLabels(self, shapes, replace=True):
method loadFlags (line 2064) | def loadFlags(self, flags):
method saveLabels (line 2072) | def saveLabels(self, filename):
method copySelectedShape (line 2129) | def copySelectedShape(self):
method labelSelectionChanged (line 2136) | def labelSelectionChanged(self):
method labelItemChanged (line 2148) | def labelItemChanged(self, item):
method labelOrderChanged (line 2152) | def labelOrderChanged(self):
method newShape (line 2158) | def newShape(self):
method scrollRequest (line 2229) | def scrollRequest(self, delta, orientation):
method setScroll (line 2235) | def setScroll(self, orientation, value):
method setZoom (line 2239) | def setZoom(self, value):
method addZoom (line 2246) | def addZoom(self, increment=1.1):
method zoomRequest (line 2254) | def zoomRequest(self, delta, pos):
method setFitWindow (line 2277) | def setFitWindow(self, value=True):
method setFitWidth (line 2283) | def setFitWidth(self, value=True):
method onNewBrightnessContrast (line 2289) | def onNewBrightnessContrast(self, qimage):
method enable_show_cross_line (line 2294) | def enable_show_cross_line(self, enabled):
method brightnessContrast (line 2299) | def brightnessContrast(self, value):
method togglePolygons (line 2318) | def togglePolygons(self, value):
method loadFile (line 2322) | def loadFile(self, filename=None):
method resizeEvent (line 2464) | def resizeEvent(self, event):
method paintCanvas (line 2473) | def paintCanvas(self):
method adjustScale (line 2479) | def adjustScale(self, initial=False):
method scaleFitWindow (line 2485) | def scaleFitWindow(self):
method scaleFitWidth (line 2497) | def scaleFitWidth(self):
method enableSaveImageWithData (line 2502) | def enableSaveImageWithData(self, enabled):
method closeEvent (line 2506) | def closeEvent(self, event):
method dragEnterEvent (line 2521) | def dragEnterEvent(self, event):
method dropEvent (line 2533) | def dropEvent(self, event):
method loadRecent (line 2542) | def loadRecent(self, filename):
method change_curr_model (line 2546) | def change_curr_model(self, model_name):
method model_explorer (line 2562) | def model_explorer(self):
method openNextImg (line 2595) | def openNextImg(self, _value=False, load=True):
method openFile (line 2621) | def openFile(self, _value=False):
method changeOutputDirDialog (line 2663) | def changeOutputDirDialog(self, _value=False):
method saveFile (line 2700) | def saveFile(self, _value=False):
method exportData (line 2716) | def exportData(self):
method saveFileAs (line 2853) | def saveFileAs(self, _value=False):
method saveFileDialog (line 2859) | def saveFileDialog(self):
method _saveFile (line 2893) | def _saveFile(self, filename):
method closeFile (line 2898) | def closeFile(self, _value=False):
method getLabelFile (line 2917) | def getLabelFile(self):
method deleteFile (line 2925) | def deleteFile(self):
method hasLabels (line 2946) | def hasLabels(self):
method hasLabelFile (line 2955) | def hasLabelFile(self):
method mayContinue (line 2962) | def mayContinue(self):
method errorMessage (line 2984) | def errorMessage(self, title, message):
method currentPath (line 2989) | def currentPath(self):
method toggleKeepPrevMode (line 2992) | def toggleKeepPrevMode(self):
method removeSelectedPoint (line 2995) | def removeSelectedPoint(self):
method deleteSelectedShape (line 3005) | def deleteSelectedShape(self):
method delete_ids_from_all_frames (line 3043) | def delete_ids_from_all_frames(self, deleted_ids, from_frame, to_frame):
method copyShape (line 3070) | def copyShape(self):
method moveShape (line 3123) | def moveShape(self):
method openDirDialog (line 3129) | def openDirDialog(self, _value=False, dirpath=None):
method imageList (line 3163) | def imageList(self):
method importDroppedImageFiles (line 3170) | def importDroppedImageFiles(self, imageFiles):
method importDirImages (line 3198) | def importDirImages(self, dirpath, pattern=None, load=True):
method scanAllImages (line 3230) | def scanAllImages(self, folderPath):
method refresh_image_MODE (line 3245) | def refresh_image_MODE(self, fromSignal=False):
method annotate_one (line 3259) | def annotate_one(self, called_from_tracking=False):
method annotate_batch (line 3308) | def annotate_batch(self):
method setConfThreshold (line 3320) | def setConfThreshold(self):
method setIOUThreshold (line 3329) | def setIOUThreshold(self):
method selectClasses (line 3338) | def selectClasses(self):
method mergeSegModels (line 3342) | def mergeSegModels(self):
method Segment_anything (line 3351) | def Segment_anything(self):
method calculate_trajectories (line 3360) | def calculate_trajectories(self, frames=None):
method right_click_menu (line 3414) | def right_click_menu(self):
method reset_for_new_mode (line 3465) | def reset_for_new_mode(self, mode):
method openVideo (line 3487) | def openVideo(self):
method openVideoFrames (line 3551) | def openVideoFrames(self):
method load_shapes_for_video_frame (line 3571) | def load_shapes_for_video_frame(self, json_file_name, index):
method loadFramefromVideo (line 3600) | def loadFramefromVideo(self, frame_array, index=1):
method nextFrame_buttonClicked (line 3660) | def nextFrame_buttonClicked(self):
method next_1_Frame_buttonClicked (line 3668) | def next_1_Frame_buttonClicked(self):
method previousFrame_buttonClicked (line 3676) | def previousFrame_buttonClicked(self):
method previous_1_Frame_buttonclicked (line 3683) | def previous_1_Frame_buttonclicked(self):
method frames_to_skip_slider_changed (line 3690) | def frames_to_skip_slider_changed(self):
method playPauseButtonClicked (line 3696) | def playPauseButtonClicked(self):
method move_frame_by_frame (line 3727) | def move_frame_by_frame(self):
method main_video_frames_slider_changed (line 3731) | def main_video_frames_slider_changed(self):
method frames_to_track_input_changed (line 3775) | def frames_to_track_input_changed(self, text):
method frames_to_track_slider_changed (line 3788) | def frames_to_track_slider_changed(self, value):
method track_assigned_objects_button_clicked (line 3792) | def track_assigned_objects_button_clicked(self):
method update_gui_after_tracking (line 3805) | def update_gui_after_tracking(self, index):
method certain_area_clicked (line 3811) | def certain_area_clicked(self, index):
method track_dropdown_changed (line 3824) | def track_dropdown_changed(self, index):
method start_tracking_button_clicked (line 3827) | def start_tracking_button_clicked(self):
method track_buttonClicked (line 3841) | def track_buttonClicked(self):
method track_full_video_button_clicked (line 4005) | def track_full_video_button_clicked(self):
method set_video_controls_visibility (line 4010) | def set_video_controls_visibility(self, visible=False):
method traj_checkBox_changed (line 4025) | def traj_checkBox_changed(self):
method mask_checkBox_changed (line 4034) | def mask_checkBox_changed(self):
method class_checkBox_changed (line 4044) | def class_checkBox_changed(self):
method conf_checkBox_changed (line 4054) | def conf_checkBox_changed(self):
method id_checkBox_changed (line 4064) | def id_checkBox_changed(self):
method bbox_checkBox_changed (line 4072) | def bbox_checkBox_changed(self):
method polygons_visable_checkBox_changed (line 4082) | def polygons_visable_checkBox_changed(self):
method export_as_video_button_clicked (line 4093) | def export_as_video_button_clicked(self, output_filename=None):
method clear_video_annotations_button_clicked (line 4162) | def clear_video_annotations_button_clicked(self):
method update_current_frame_annotation_button_clicked (line 4187) | def update_current_frame_annotation_button_clicked(self):
method update_current_frame_annotation (line 4199) | def update_current_frame_annotation(self):
method trajectory_length_lineEdit_changed (line 4239) | def trajectory_length_lineEdit_changed(self):
method addVideoControls (line 4248) | def addVideoControls(self):
method draw_bb_on_image (line 4542) | def draw_bb_on_image(self, image, shapes, image_qt_flag=True):
method waitWindow (line 4549) | def waitWindow(self, visible=False, text=None):
method set_sam_toolbar_enable (line 4560) | def set_sam_toolbar_enable(self, enable=False):
method set_sam_toolbar_visibility (line 4568) | def set_sam_toolbar_visibility(self, visible=False):
method addSamControls (line 4582) | def addSamControls(self):
method updateSamControls (line 4750) | def updateSamControls(self):
method sam_reset_button_clicked (line 4757) | def sam_reset_button_clicked(self):
method sam_enhance_annotation_button_clicked (line 4761) | def sam_enhance_annotation_button_clicked(self):
method sam_models (line 4798) | def sam_models(self):
method sam_model_comboBox_changed (line 4810) | def sam_model_comboBox_changed(self):
method sam_buttons_colors (line 4848) | def sam_buttons_colors(self, mode):
method set_sam_toolbar_enable (line 4859) | def set_sam_toolbar_enable(self, setEnabled):
method set_sam_toolbar_colors (line 4866) | def set_sam_toolbar_colors(self, mode):
method sam_add_point_button_clicked (line 4897) | def sam_add_point_button_clicked(self):
method sam_remove_point_button_clicked (line 4913) | def sam_remove_point_button_clicked(self):
method sam_select_rect_button_clicked (line 4928) | def sam_select_rect_button_clicked(self):
method sam_clear_annotation_button_clicked (line 4944) | def sam_clear_annotation_button_clicked(self):
method sam_finish_annotation_button_clicked (line 4964) | def sam_finish_annotation_button_clicked(self):
method check_sam_instance_in_shapes (line 5019) | def check_sam_instance_in_shapes(self, shapes):
method run_sam_model (line 5028) | def run_sam_model(self):
method turnOFF_SAM (line 5065) | def turnOFF_SAM(self):
method turnON_SAM (line 5076) | def turnON_SAM(self):
method sam_enhanced_bbox_segment (line 5087) | def sam_enhanced_bbox_segment(self, frameIMAGE, cur_bbox, thresh, max_...
method load_objects_from_json__json (line 5119) | def load_objects_from_json__json(self):
method load_objects_to_json__json (line 5125) | def load_objects_to_json__json(self, listObj):
method load_objects_from_json__orjson (line 5130) | def load_objects_from_json__orjson(self):
method load_objects_to_json__orjson (line 5136) | def load_objects_to_json__orjson(self, listObj):
FILE: DLTA_AI_app/labelme/cli/draw_json.py
function main (line 16) | def main():
FILE: DLTA_AI_app/labelme/cli/draw_label_png.py
function main (line 11) | def main():
FILE: DLTA_AI_app/labelme/cli/json_to_dataset.py
function main (line 14) | def main():
FILE: DLTA_AI_app/labelme/cli/on_docker.py
function get_ip (line 16) | def get_ip():
function labelme_on_docker (line 36) | def labelme_on_docker(in_file, out_file):
function main (line 82) | def main():
FILE: DLTA_AI_app/labelme/config/__init__.py
function update_dict (line 12) | def update_dict(target_dict, new_dict, validate_item=None):
function get_default_config (line 28) | def get_default_config():
function validate_config_item (line 44) | def validate_config_item(key, value):
function get_config (line 61) | def get_config(config_file_or_yaml=None, config_from_args=None):
FILE: DLTA_AI_app/labelme/intelligence.py
class IntelligenceWorker (line 44) | class IntelligenceWorker(QThread):
method __init__ (line 47) | def __init__(self, parent, images, source,multi_model_flag=False):
method run (line 55) | def run(self):
class Intelligence (line 85) | class Intelligence():
method __init__ (line 86) | def __init__(self, parent):
method make_mm_model (line 107) | def make_mm_model(self, selected_model_name):
method make_mm_model_more (line 149) | def make_mm_model_more(self, selected_model_name, config, checkpoint):
method get_shapes_of_one (line 176) | def get_shapes_of_one(self, image, img_array_flag=False, multi_model_f...
method get_shapes_of_batch (line 261) | def get_shapes_of_batch(self, images, multi_model_flag=False, notif = ...
method updateDialog (line 268) | def updateDialog(self, completed, total):
method startOperationDialog (line 276) | def startOperationDialog(self):
method onProgressDialogCanceledOrCompleted (line 289) | def onProgressDialogCanceledOrCompleted(self):
method clear_annotating_models (line 302) | def clear_annotating_models(self):
method saveLabelFile (line 305) | def saveLabelFile(self, filename, detectedShapes):
FILE: DLTA_AI_app/labelme/label_file.py
function open (line 20) | def open(name, mode):
class LabelFileError (line 31) | class LabelFileError(Exception):
class LabelFile (line 35) | class LabelFile(object):
method __init__ (line 39) | def __init__(self, filename=None):
method load_image_file (line 48) | def load_image_file(filename):
method load (line 70) | def load(self, filename):
method _check_image_height_and_width (line 155) | def _check_image_height_and_width(imageData, imageHeight, imageWidth):
method save (line 171) | def save(
method is_label_file (line 211) | def is_label_file(filename):
FILE: DLTA_AI_app/labelme/logger.py
class ColoredFormatter (line 24) | class ColoredFormatter(logging.Formatter):
method __init__ (line 25) | def __init__(self, fmt, use_color=True):
method format (line 29) | def format(self, record):
class ColoredLogger (line 52) | class ColoredLogger(logging.Logger):
method __init__ (line 58) | def __init__(self, name):
FILE: DLTA_AI_app/labelme/shape.py
class Shape (line 22) | class Shape(object):
method __init__ (line 47) | def __init__(
method shape_type (line 85) | def shape_type(self):
method shape_type (line 89) | def shape_type(self, value):
method close (line 103) | def close(self):
method addPoint (line 106) | def addPoint(self, point):
method canAddPoint (line 112) | def canAddPoint(self):
method popPoint (line 115) | def popPoint(self):
method insertPoint (line 120) | def insertPoint(self, i, point):
method removePoint (line 123) | def removePoint(self, i):
method isClosed (line 126) | def isClosed(self):
method setOpen (line 129) | def setOpen(self):
method getRectFromLine (line 132) | def getRectFromLine(self, pt1, pt2):
method paint (line 137) | def paint(self, painter):
method drawVertex (line 193) | def drawVertex(self, path, i):
method nearestVertex (line 211) | def nearestVertex(self, point, epsilon):
method nearestEdge (line 221) | def nearestEdge(self, point, epsilon):
method containsPoint (line 232) | def containsPoint(self, point):
method getCircleRectFromLine (line 235) | def getCircleRectFromLine(self, line):
method makePath (line 245) | def makePath(self):
method boundingRect (line 262) | def boundingRect(self):
method moveBy (line 265) | def moveBy(self, offset):
method moveVertexBy (line 268) | def moveVertexBy(self, i, offset):
method highlightVertex (line 271) | def highlightVertex(self, i, action):
method highlightClear (line 282) | def highlightClear(self):
method copy (line 286) | def copy(self):
method __len__ (line 289) | def __len__(self):
method __getitem__ (line 292) | def __getitem__(self, key):
method __setitem__ (line 295) | def __setitem__(self, key, value):
FILE: DLTA_AI_app/labelme/testing.py
function assert_labelfile_sanity (line 8) | def assert_labelfile_sanity(filename):
FILE: DLTA_AI_app/labelme/utils/_io.py
function lblsave (line 7) | def lblsave(filename, lbl):
FILE: DLTA_AI_app/labelme/utils/custom_exports.py
class CustomExport (line 6) | class CustomExport:
method __init__ (line 20) | def __init__(self, file_name, button_name, format, function, mode = "v...
method __call__ (line 39) | def __call__(self, *args):
function foo (line 88) | def foo():
function bar (line 91) | def bar(results_file, vid_width, vid_height, annotation_path):
function baz (line 98) | def baz(json_paths, annotation_path):
function count_objects (line 106) | def count_objects(json_paths, annotation_path):
FILE: DLTA_AI_app/labelme/utils/export.py
function center_of_polygon (line 11) | def center_of_polygon(polygon):
function get_bbox (line 46) | def get_bbox(segmentation):
function get_area_from_polygon (line 82) | def get_area_from_polygon(polygon, mode="segmentation"):
function parse_img_export (line 114) | def parse_img_export(target_directory, save_path):
function exportCOCO (line 138) | def exportCOCO(json_paths, annotation_path):
function exportCOCOvid (line 249) | def exportCOCOvid(results_file, vid_width, vid_height, annotation_path):
function exportMOT (line 353) | def exportMOT(results_file, annotation_path):
class FolderDialog (line 378) | class FolderDialog(QFileDialog):
method __init__ (line 388) | def __init__(self, default_file_name, default_format):
FILE: DLTA_AI_app/labelme/utils/helpers/mathOps.py
class ColorGen (line 30) | class ColorGen:
method __init__ (line 41) | def __init__(self):
method generateColors (line 44) | def generateColors(self, num, lightness, saturation):
function get_bbox_xyxy (line 70) | def get_bbox_xyxy(segment):
function addPoints (line 90) | def addPoints(shape, n):
function reducePoints (line 137) | def reducePoints(polygon, n):
function handlePoints (line 202) | def handlePoints(polygon, n):
function handleTwoSegments (line 228) | def handleTwoSegments(segment1, segment2):
function allign (line 250) | def allign(shape1, shape2):
function centerOFmass (line 282) | def centerOFmass(points):
function flattener (line 299) | def flattener(list_2d):
function mapFrameToTime (line 316) | def mapFrameToTime(frameNumber, fps):
function class_name_to_id (line 344) | def class_name_to_id(class_name):
function compute_iou (line 364) | def compute_iou(box1, box2):
function compute_iou_exact (line 400) | def compute_iou_exact(shape1, shape2):
function match_detections_with_tracks (line 425) | def match_detections_with_tracks(detections, tracks, iou_threshold=0.5):
function get_boxes_conf_classids_segments (line 471) | def get_boxes_conf_classids_segments(shapes):
function convert_qt_shapes_to_shapes (line 511) | def convert_qt_shapes_to_shapes(qt_shapes):
function convert_shapes_to_qt_shapes (line 538) | def convert_shapes_to_qt_shapes(shapes):
function convert_QT_to_cv (line 566) | def convert_QT_to_cv(incomingImage):
function convert_cv_to_qt (line 588) | def convert_cv_to_qt(cv_img):
function SAM_rects_to_boxes (line 608) | def SAM_rects_to_boxes(rects):
function SAM_points_and_labels_from_coordinates (line 633) | def SAM_points_and_labels_from_coordinates(coordinates):
function load_objects_from_json__json (line 662) | def load_objects_from_json__json(json_file_name, nTotalFrames):
function load_objects_to_json__json (line 689) | def load_objects_to_json__json(json_file_name, listObj):
function load_objects_from_json__orjson (line 709) | def load_objects_from_json__orjson(json_file_name, nTotalFrames):
function load_objects_to_json__orjson (line 734) | def load_objects_to_json__orjson(json_file_name, listObj):
function scaleQTshape (line 752) | def scaleQTshape(self, originalshape, center, ratioX, ratioY):
function is_id_repeated (line 786) | def is_id_repeated(self, group_id, frameIdex=-1):
function checkKeyFrames (line 812) | def checkKeyFrames(ids, keyFrames):
function getInterpolated (line 842) | def getInterpolated(baseObject, baseObjectFrame, nextObject, nextObjectF...
function update_saved_models_json (line 877) | def update_saved_models_json(cwd):
function delete_id_from_rec_and_traj (line 904) | def delete_id_from_rec_and_traj(id, id_frames_rec, trajectories, frames):
function adjust_shapes_to_original_image (line 926) | def adjust_shapes_to_original_image(shapes, x1, y1, area_points):
function track_area_adjustedBboex (line 945) | def track_area_adjustedBboex(area_points, dims, ratio = 0.1):
function get_contour_length (line 956) | def get_contour_length(contour):
function mask_to_polygons (line 961) | def mask_to_polygons(mask, n_points=25, resize_factors=[1.0, 1.0]):
function polygon_to_shape (line 981) | def polygon_to_shape(polygon, score, className="SAM instance"):
function OURnms_confidenceBased (line 998) | def OURnms_confidenceBased(shapes, iou_threshold=0.5):
function OURnms_areaBased_fromSAM (line 1051) | def OURnms_areaBased_fromSAM(self, sam_result, iou_threshold=0.5):
FILE: DLTA_AI_app/labelme/utils/helpers/visualizations.py
function draw_bb_id (line 21) | def draw_bb_id(flags, image, x, y, w, h, id, conf, label, color=(0, 0, 2...
function draw_trajectories (line 104) | def draw_trajectories(trajectories, CurrentFrameIndex, flags, img, shapes):
function draw_bb_on_image (line 160) | def draw_bb_on_image(trajectories, CurrentFrameIndex, flags, nTotalFrame...
function draw_bb_on_image_MODE (line 237) | def draw_bb_on_image_MODE(flags, image, shapes):
function draw_bb_label_on_image_MODE (line 290) | def draw_bb_label_on_image_MODE(flags, image, x, y, w, h, label, conf, c...
FILE: DLTA_AI_app/labelme/utils/image.py
function img_data_to_pil (line 10) | def img_data_to_pil(img_data):
function img_data_to_arr (line 17) | def img_data_to_arr(img_data):
function img_b64_to_arr (line 23) | def img_b64_to_arr(img_b64):
function img_pil_to_data (line 29) | def img_pil_to_data(img_pil):
function img_arr_to_b64 (line 36) | def img_arr_to_b64(img_arr):
function img_data_to_png_data (line 48) | def img_data_to_png_data(img_data):
function apply_exif_orientation (line 59) | def apply_exif_orientation(image):
FILE: DLTA_AI_app/labelme/utils/model_explorer.py
class ModelExplorerDialog (line 15) | class ModelExplorerDialog(QDialog):
method __init__ (line 25) | def __init__(self, main_window=None, mute=None, notification=None):
method populate_table (line 122) | def populate_table(self):
method search (line 195) | def search(self):
method select_model (line 232) | def select_model(self):
method download_model (line 250) | def download_model(self, id):
method cancel_download (line 345) | def cancel_download(self):
method create_download_callback (line 355) | def create_download_callback(self, model_id):
method check_availability (line 368) | def check_availability(self):
method open_checkpoints_dir (line 383) | def open_checkpoints_dir(self):
FILE: DLTA_AI_app/labelme/utils/qt.py
function newIcon (line 14) | def newIcon(icon):
function newButton (line 19) | def newButton(text, icon=None, slot=None):
function newAction (line 28) | def newAction(
function addActions (line 61) | def addActions(widget, actions):
function labelValidator (line 71) | def labelValidator():
class struct (line 75) | class struct(object):
method __init__ (line 76) | def __init__(self, **kwargs):
function distance (line 80) | def distance(p):
function distancetoline (line 84) | def distancetoline(point, line):
function fmtShortcut (line 98) | def fmtShortcut(text):
FILE: DLTA_AI_app/labelme/utils/sam.py
class Sam_Predictor (line 10) | class Sam_Predictor():
method __init__ (line 11) | def __init__(self, model_type, checkpoint_path, device):
method set_new_image (line 22) | def set_new_image(self, image):
method clear_logit (line 26) | def clear_logit(self):
method predict (line 30) | def predict(self, point_coords=None, point_labels=None, box=None, mult...
method predict_batch (line 77) | def predict_batch(self, boxes=None, image=None):
method check_image (line 90) | def check_image(self , new_image):
method get_all_shapes (line 100) | def get_all_shapes(self, image, iou_threshold):
FILE: DLTA_AI_app/labelme/utils/shape.py
function polygons_to_mask (line 11) | def polygons_to_mask(img_shape, polygons, shape_type=None):
function shape_to_mask (line 19) | def shape_to_mask(
function shapes_to_label (line 51) | def shapes_to_label(img_shape, shapes, label_name_to_value):
function labelme_shapes_to_label (line 78) | def labelme_shapes_to_label(img_shape, shapes):
function masks_to_bboxes (line 97) | def masks_to_bboxes(masks):
FILE: DLTA_AI_app/labelme/utils/vid_to_frames.py
class VideoFrameExtractor (line 11) | class VideoFrameExtractor(QDialog):
method __init__ (line 12) | def __init__(self, mute = None, notification = None):
method select_file (line 166) | def select_file(self):
method update_sampling_rate (line 214) | def update_sampling_rate(self, value):
method update_sampling_slider (line 219) | def update_sampling_slider(self, text):
method update_start_frame (line 236) | def update_start_frame(self, value):
method update_start_slider (line 241) | def update_start_slider(self, text):
method update_end_frame (line 259) | def update_end_frame(self, value):
method update_end_slider (line 264) | def update_end_slider(self, text):
method extract_frames (line 279) | def extract_frames(self):
method stop_extraction (line 290) | def stop_extraction(self):
method get_time_string (line 294) | def get_time_string(self, seconds, separator=":"):
method vid_to_frames (line 301) | def vid_to_frames(self, vid_path, sampling_rate, start_frame, end_frame):
FILE: DLTA_AI_app/labelme/widgets/ClassesWidget.py
class Classeswidget (line 7) | class Classeswidget(QtWidgets.QDialog):
method __init__ (line 8) | def __init__(self):
method _createQComboBox (line 15) | def _createQComboBox(self):
method onNewValue (line 21) | def onNewValue(self, value):
method getValue (line 24) | def getValue(self):
method setValue (line 27) | def setValue(self, value):
method exec (line 30) | def exec(self):
FILE: DLTA_AI_app/labelme/widgets/MsgBox.py
function OKmsgBox (line 4) | def OKmsgBox(title, text, type = "info", turnResult = False):
FILE: DLTA_AI_app/labelme/widgets/ThresholdWidget.py
class ThresholdWidget (line 6) | class ThresholdWidget(QtWidgets.QDialog):
method __init__ (line 7) | def __init__(self):
method _createQLineEdit (line 14) | def _createQLineEdit(self):
FILE: DLTA_AI_app/labelme/widgets/brightness_contrast_dialog.py
class BrightnessContrastDialog (line 10) | class BrightnessContrastDialog(QtWidgets.QDialog):
method __init__ (line 11) | def __init__(self, img, callback, parent=None):
method onNewValue (line 28) | def onNewValue(self, value):
method _create_slider (line 40) | def _create_slider(self):
FILE: DLTA_AI_app/labelme/widgets/canvas.py
class Canvas (line 22) | class Canvas(QtWidgets.QWidget):
method __init__ (line 48) | def __init__(self, *args, **kwargs):
method fillDrawing (line 120) | def fillDrawing(self):
method setFillDrawing (line 123) | def setFillDrawing(self, value):
method createMode (line 127) | def createMode(self):
method createMode (line 131) | def createMode(self, value):
method storeShapes (line 138) | def storeShapes(self):
method isShapeRestorable (line 147) | def isShapeRestorable(self):
method restoreShape (line 155) | def restoreShape(self):
method enterEvent (line 172) | def enterEvent(self, ev):
method leaveEvent (line 175) | def leaveEvent(self, ev):
method focusOutEvent (line 179) | def focusOutEvent(self, ev):
method isVisible (line 182) | def isVisible(self, shape):
method drawing (line 185) | def drawing(self):
method editing (line 188) | def editing(self):
method setEditing (line 191) | def setEditing(self, value=True):
method unHighlight (line 197) | def unHighlight(self):
method selectedVertex (line 206) | def selectedVertex(self):
method set_show_cross_line (line 209) | def set_show_cross_line(self, enabled):
method mouseMoveEvent (line 214) | def mouseMoveEvent(self, ev):
method addPointToEdge (line 329) | def addPointToEdge(self):
method removeSelectedPoint (line 342) | def removeSelectedPoint(self):
method corrected_pos_into_pixmap (line 354) | def corrected_pos_into_pixmap(self, pos):
method mousePressEvent (line 362) | def mousePressEvent(self, ev):
method handle_right_click (line 418) | def handle_right_click(self, menu):
method mouseReleaseEvent (line 429) | def mouseReleaseEvent(self, ev):
method endMove (line 478) | def endMove(self, copy):
method hideBackroundShapes (line 494) | def hideBackroundShapes(self, value):
method setHiding (line 502) | def setHiding(self, enable=True):
method canCloseShape (line 505) | def canCloseShape(self):
method mouseDoubleClickEvent (line 508) | def mouseDoubleClickEvent(self, ev):
method selectShapes (line 523) | def selectShapes(self, shapes):
method selectShapePoint (line 528) | def selectShapePoint(self, point, multiple_selection_mode):
method calculateOffsets (line 548) | def calculateOffsets(self, shape, point):
method boundedMoveVertex (line 556) | def boundedMoveVertex(self, pos):
method boundedMoveShapes (line 565) | def boundedMoveShapes(self, shapes, pos):
method deSelectShape (line 590) | def deSelectShape(self):
method deleteSelected (line 596) | def deleteSelected(self):
method deleteShape (line 607) | def deleteShape(self, shape):
method copySelectedShapes (line 615) | def copySelectedShapes(self):
method boundedShiftShapes (line 622) | def boundedShiftShapes(self, shapes):
method paintEvent (line 632) | def paintEvent(self, event):
method transformPos (line 806) | def transformPos(self, point):
method offsetToCenter (line 811) | def offsetToCenter(self):
method outOfPixmap (line 820) | def outOfPixmap(self, p):
method finalise (line 824) | def finalise(self, SAM_SHAPE=False):
method closeEnough (line 843) | def closeEnough(self, p1, p2):
method intersectionPoint (line 850) | def intersectionPoint(self, p1, p2):
method intersectingEdges (line 876) | def intersectingEdges(self, point1, point2, points):
method sizeHint (line 907) | def sizeHint(self):
method minimumSizeHint (line 910) | def minimumSizeHint(self):
method wheelEvent (line 915) | def wheelEvent(self, ev):
method keyPressEvent (line 928) | def keyPressEvent(self, ev):
method cancelManualDrawing (line 939) | def cancelManualDrawing(self):
method setLastLabel (line 944) | def setLastLabel(self, text, flags):
method undoLastLine (line 952) | def undoLastLine(self):
method undoLastPoint (line 960) | def undoLastPoint(self):
method loadPixmap (line 971) | def loadPixmap(self, pixmap, clear_shapes=True):
method loadShapes (line 977) | def loadShapes(self, shapes, replace=True):
method setShapeVisible (line 989) | def setShapeVisible(self, shape, value):
method overrideCursor (line 993) | def overrideCursor(self, cursor):
method restoreCursor (line 998) | def restoreCursor(self):
method resetState (line 1001) | def resetState(self):
FILE: DLTA_AI_app/labelme/widgets/check_updates_UI.py
function PopUp (line 11) | def PopUp():
FILE: DLTA_AI_app/labelme/widgets/color_dialog.py
class ColorDialog (line 4) | class ColorDialog(QtWidgets.QColorDialog):
method __init__ (line 5) | def __init__(self, parent=None):
method getColor (line 18) | def getColor(self, value=None, title=None, default=None):
method checkRestore (line 26) | def checkRestore(self, button):
FILE: DLTA_AI_app/labelme/widgets/deleteSelectedShape_UI.py
function PopUp (line 6) | def PopUp(TOTAL_VIDEO_FRAMES, INDEX_OF_CURRENT_FRAME, config):
FILE: DLTA_AI_app/labelme/widgets/editLabel_videoMode.py
function editLabel_idChanged_UI (line 8) | def editLabel_idChanged_UI(config, old_group_id, new_group_id, id_frames...
function check_duplicates_editLabel (line 55) | def check_duplicates_editLabel(id_frames_rec, old_group_id, new_group_id...
function editLabel_handle_data (line 110) | def editLabel_handle_data(currFrame, listObj,
function update_id_in_listObjframe (line 165) | def update_id_in_listObjframe(listObj, frame, shape, old_id, new_id = No...
function update_id_in_listObjframes (line 196) | def update_id_in_listObjframes(listObj, frames, shape, old_id, new_id = ...
function transfer_rec_and_traj (line 219) | def transfer_rec_and_traj(id, id_frames_rec, trajectories, frames, new_id):
function reducing_Intersection (line 266) | def reducing_Intersection(Intersection):
FILE: DLTA_AI_app/labelme/widgets/escapable_qlist_widget.py
class EscapableQListWidget (line 5) | class EscapableQListWidget(QtWidgets.QListWidget):
method keyPressEvent (line 6) | def keyPressEvent(self, event):
FILE: DLTA_AI_app/labelme/widgets/exportData_UI.py
function PopUp (line 15) | def PopUp(mode = "video"):
FILE: DLTA_AI_app/labelme/widgets/feedback_UI.py
function PopUp (line 6) | def PopUp():
FILE: DLTA_AI_app/labelme/widgets/getIDfromUser_UI.py
function PopUp (line 7) | def PopUp(self, group_id, text):
FILE: DLTA_AI_app/labelme/widgets/interpolation_UI.py
function PopUp (line 8) | def PopUp(config):
FILE: DLTA_AI_app/labelme/widgets/label_dialog.py
class LabelQLineEdit (line 19) | class LabelQLineEdit(QtWidgets.QLineEdit):
method setListWidget (line 20) | def setListWidget(self, list_widget):
method keyPressEvent (line 23) | def keyPressEvent(self, e):
class LabelDialog (line 30) | class LabelDialog(QtWidgets.QDialog):
method __init__ (line 31) | def __init__(
method addLabelHistory (line 178) | def addLabelHistory(self, label):
method labelSelected (line 185) | def labelSelected(self, item):
method validate (line 188) | def validate(self):
method labelDoubleClicked (line 197) | def labelDoubleClicked(self, item):
method postProcess (line 200) | def postProcess(self):
method updateFlags (line 208) | def updateFlags(self, label_new):
method deleteFlags (line 219) | def deleteFlags(self):
method resetFlags (line 225) | def resetFlags(self, label=""):
method setFlags (line 233) | def setFlags(self, flags):
method getFlags (line 241) | def getFlags(self):
method getGroupId (line 249) | def getGroupId(self):
method getContent (line 255) | def getContent(self):
method setContent (line 261) | def setContent(self, content):
method popUp (line 266) | def popUp(self, text=None, move=True, flags=None, group_id=None, conte...
FILE: DLTA_AI_app/labelme/widgets/label_list_widget.py
class HTMLDelegate (line 10) | class HTMLDelegate(QtWidgets.QStyledItemDelegate):
method __init__ (line 11) | def __init__(self, parent=None):
method paint (line 15) | def paint(self, painter, option, index):
method sizeHint (line 62) | def sizeHint(self, option, index):
class LabelListWidgetItem (line 70) | class LabelListWidgetItem(QtGui.QStandardItem):
method __init__ (line 71) | def __init__(self, text=None, shape=None):
method clone (line 84) | def clone(self):
method setShape (line 87) | def setShape(self, shape):
method shape (line 90) | def shape(self):
method __hash__ (line 93) | def __hash__(self):
method __repr__ (line 96) | def __repr__(self):
class StandardItemModel (line 100) | class StandardItemModel(QtGui.QStandardItemModel):
method removeRows (line 104) | def removeRows(self, *args, **kwargs):
class LabelListWidget (line 110) | class LabelListWidget(QtWidgets.QListView):
method __init__ (line 115) | def __init__(self):
method __len__ (line 132) | def __len__(self):
method __getitem__ (line 135) | def __getitem__(self, i):
method __iter__ (line 138) | def __iter__(self):
method itemDropped (line 143) | def itemDropped(self):
method itemChanged (line 147) | def itemChanged(self):
method itemSelectionChangedEvent (line 150) | def itemSelectionChangedEvent(self, selected, deselected):
method itemDoubleClickedEvent (line 157) | def itemDoubleClickedEvent(self, index):
method selectedItems (line 160) | def selectedItems(self):
method scrollToItem (line 163) | def scrollToItem(self, item):
method addItem (line 166) | def addItem(self, item):
method removeItem (line 172) | def removeItem(self, item):
method selectItem (line 176) | def selectItem(self, item):
method findItemByShape (line 180) | def findItemByShape(self, shape):
method clear (line 187) | def clear(self):
FILE: DLTA_AI_app/labelme/widgets/links.py
function open_git_hub (line 3) | def open_git_hub():
function open_issue (line 17) | def open_issue():
function open_license (line 31) | def open_license():
function open_guide (line 45) | def open_guide():
function open_release (line 59) | def open_release(link = None):
FILE: DLTA_AI_app/labelme/widgets/merge_feature_UI.py
class MergeFeatureUI (line 7) | class MergeFeatureUI():
method __init__ (line 8) | def __init__(self, parent):
method mergeSegModels (line 13) | def mergeSegModels(self):
FILE: DLTA_AI_app/labelme/widgets/notification.py
function PopUp (line 3) | def PopUp(text):
FILE: DLTA_AI_app/labelme/widgets/open_file.py
function PopUp (line 8) | def PopUp():
FILE: DLTA_AI_app/labelme/widgets/preferences_UI.py
function PopUp (line 8) | def PopUp():
FILE: DLTA_AI_app/labelme/widgets/runtime_data_UI.py
function PopUp (line 9) | def PopUp():
FILE: DLTA_AI_app/labelme/widgets/scaleObject_UI.py
function PopUp (line 9) | def PopUp(self):
FILE: DLTA_AI_app/labelme/widgets/segmentation_options_UI.py
class SegmentationOptionsUI (line 22) | class SegmentationOptionsUI():
method __init__ (line 23) | def __init__(self, parent):
method setConfThreshold (line 43) | def setConfThreshold(self, prev_threshold=0.3):
method setIOUThreshold (line 97) | def setIOUThreshold(self, prev_threshold=0.5):
method selectClasses (line 152) | def selectClasses(self):
method saveClasses (line 251) | def saveClasses(self, dialog, is_default=False):
method selectAll (line 282) | def selectAll(self):
method deselectAll (line 292) | def deselectAll(self):
FILE: DLTA_AI_app/labelme/widgets/shortcut_selector_UI.py
function PopUp (line 5) | def PopUp():
FILE: DLTA_AI_app/labelme/widgets/tool_bar.py
class ToolBar (line 5) | class ToolBar(QtWidgets.QToolBar):
method __init__ (line 6) | def __init__(self, title):
method addAction (line 15) | def addAction(self, action):
FILE: DLTA_AI_app/labelme/widgets/unique_label_qlist_widget.py
class UniqueLabelQListWidget (line 9) | class UniqueLabelQListWidget(EscapableQListWidget):
method mousePressEvent (line 10) | def mousePressEvent(self, event):
method findItemsByLabel (line 15) | def findItemsByLabel(self, label):
method createItemFromLabel (line 23) | def createItemFromLabel(self, label):
method setItemLabel (line 28) | def setItemLabel(self, item, label, color=None):
FILE: DLTA_AI_app/labelme/widgets/zoom_widget.py
class ZoomWidget (line 6) | class ZoomWidget(QtWidgets.QSpinBox):
method __init__ (line 7) | def __init__(self, value=100):
method minimumSizeHint (line 17) | def minimumSizeHint(self):
FILE: DLTA_AI_app/mmdetection/.dev_scripts/benchmark_filter.py
function parse_args (line 7) | def parse_args():
function main (line 132) | def main():
FILE: DLTA_AI_app/mmdetection/.dev_scripts/benchmark_inference_fps.py
function parse_args (line 14) | def parse_args():
function results2markdown (line 62) | def results2markdown(result_dict):
FILE: DLTA_AI_app/mmdetection/.dev_scripts/benchmark_test_image.py
function parse_args (line 12) | def parse_args():
function inference_model (line 33) | def inference_model(config_name, checkpoint, args, logger=None):
function main (line 60) | def main(args):
FILE: DLTA_AI_app/mmdetection/.dev_scripts/check_links.py
function parse_args (line 17) | def parse_args():
class MatchTuple (line 47) | class MatchTuple(NamedTuple):
function check_link (line 53) | def check_link(
function check_url (line 69) | def check_url(match_tuple: MatchTuple,
function check_path (line 83) | def check_path(match_tuple: MatchTuple) -> bool:
function main (line 91) | def main():
FILE: DLTA_AI_app/mmdetection/.dev_scripts/convert_test_benchmark_script.py
function parse_args (line 9) | def parse_args():
function process_model_info (line 27) | def process_model_info(model_info, work_dir):
function create_test_bash_info (line 46) | def create_test_bash_info(commands, model_test_dict, port, script_name,
function main (line 74) | def main():
FILE: DLTA_AI_app/mmdetection/.dev_scripts/convert_train_benchmark_script.py
function parse_args (line 7) | def parse_args():
function main (line 31) | def main():
FILE: DLTA_AI_app/mmdetection/.dev_scripts/gather_models.py
function ordered_yaml_dump (line 15) | def ordered_yaml_dump(data, stream=None, Dumper=yaml.SafeDumper, **kwds):
function process_checkpoint (line 28) | def process_checkpoint(in_file, out_file):
function is_by_epoch (line 51) | def is_by_epoch(config):
function get_final_epoch_or_iter (line 56) | def get_final_epoch_or_iter(config):
function get_best_epoch_or_iter (line 64) | def get_best_epoch_or_iter(exp_dir):
function get_real_epoch_or_iter (line 73) | def get_real_epoch_or_iter(config):
function get_final_results (line 84) | def get_final_results(log_json_path,
function get_dataset_name (line 132) | def get_dataset_name(config):
function convert_model_info_to_pwc (line 149) | def convert_model_info_to_pwc(model_infos):
function parse_args (line 208) | def parse_args():
function main (line 225) | def main():
FILE: DLTA_AI_app/mmdetection/.dev_scripts/gather_test_benchmark_metric.py
function parse_args (line 10) | def parse_args():
FILE: DLTA_AI_app/mmdetection/.dev_scripts/gather_train_benchmark_metric.py
function parse_args (line 20) | def parse_args():
FILE: DLTA_AI_app/mmdetection/.dev_scripts/test_init_backbone.py
function _get_config_directory (line 14) | def _get_config_directory():
function _get_config_module (line 29) | def _get_config_module(fname):
function _get_detector_cfg (line 38) | def _get_detector_cfg(fname):
function _traversed_config_file (line 49) | def _traversed_config_file():
function _check_backbone (line 90) | def _check_backbone(config, print_cfg=True):
function test_load_pretrained (line 152) | def test_load_pretrained(config):
function _test_load_pretrained (line 161) | def _test_load_pretrained():
FILE: DLTA_AI_app/mmdetection/docs/en/conf.py
function get_version (line 29) | def get_version():
function builder_inited_handler (line 111) | def builder_inited_handler(app):
function setup (line 115) | def setup(app):
FILE: DLTA_AI_app/mmdetection/docs/zh_cn/conf.py
function get_version (line 29) | def get_version():
function builder_inited_handler (line 113) | def builder_inited_handler(app):
function setup (line 117) | def setup(app):
FILE: DLTA_AI_app/mmdetection/mmdet/__init__.py
function digit_version (line 7) | def digit_version(version_str):
FILE: DLTA_AI_app/mmdetection/mmdet/apis/inference.py
function init_detector (line 18) | def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=...
class LoadImage (line 66) | class LoadImage:
method __call__ (line 72) | def __call__(self, results):
function inference_detector (line 99) | def inference_detector(model, imgs):
function async_inference_detector (line 165) | async def async_inference_detector(model, imgs):
function show_result_pyplot (line 222) | def show_result_pyplot(model,
FILE: DLTA_AI_app/mmdetection/mmdet/apis/test.py
function single_gpu_test (line 17) | def single_gpu_test(model,
function multi_gpu_test (line 81) | def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False):
function collect_results_cpu (line 136) | def collect_results_cpu(result_part, size, tmpdir=None):
function collect_results_gpu (line 179) | def collect_results_gpu(result_part, size):
FILE: DLTA_AI_app/mmdetection/mmdet/apis/train.py
function init_random_seed (line 19) | def init_random_seed(seed=None, device='cuda'):
function set_random_seed (line 52) | def set_random_seed(seed, deterministic=False):
function auto_scale_lr (line 71) | def auto_scale_lr(cfg, distributed, logger):
function train_detector (line 117) | def train_detector(model,
FILE: DLTA_AI_app/mmdetection/mmdet/core/anchor/anchor_generator.py
class AnchorGenerator (line 13) | class AnchorGenerator:
method __init__ (line 61) | def __init__(self,
method num_base_anchors (line 116) | def num_base_anchors(self):
method num_base_priors (line 121) | def num_base_priors(self):
method num_levels (line 127) | def num_levels(self):
method gen_base_anchors (line 131) | def gen_base_anchors(self):
method gen_single_level_base_anchors (line 151) | def gen_single_level_base_anchors(self,
method _meshgrid (line 196) | def _meshgrid(self, x, y, row_major=True):
method grid_priors (line 216) | def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda'):
method single_level_grid_priors (line 241) | def single_level_grid_priors(self,
method sparse_priors (line 283) | def sparse_priors(self,
method grid_anchors (line 318) | def grid_anchors(self, featmap_sizes, device='cuda'):
method single_level_grid_anchors (line 347) | def single_level_grid_anchors(self,
method valid_flags (line 392) | def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
method single_level_valid_flags (line 419) | def single_level_valid_flags(self,
method __repr__ (line 451) | def __repr__(self):
class SSDAnchorGenerator (line 471) | class SSDAnchorGenerator(AnchorGenerator):
method __init__ (line 492) | def __init__(self,
method gen_base_anchors (line 571) | def gen_base_anchors(self):
method __repr__ (line 592) | def __repr__(self):
class LegacyAnchorGenerator (line 610) | class LegacyAnchorGenerator(AnchorGenerator):
method gen_single_level_base_anchors (line 658) | def gen_single_level_base_anchors(self,
class LegacySSDAnchorGenerator (line 709) | class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator):
method __init__ (line 716) | def __init__(self,
class YOLOAnchorGenerator (line 734) | class YOLOAnchorGenerator(AnchorGenerator):
method __init__ (line 744) | def __init__(self, strides, base_sizes):
method num_levels (line 757) | def num_levels(self):
method gen_base_anchors (line 761) | def gen_base_anchors(self):
method gen_single_level_base_anchors (line 778) | def gen_single_level_base_anchors(self, base_sizes_per_level, center=N...
method responsible_flags (line 806) | def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'):
method single_level_responsible_flags (line 831) | def single_level_responsible_flags(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/anchor/builder.py
function build_prior_generator (line 11) | def build_prior_generator(cfg, default_args=None):
function build_anchor_generator (line 15) | def build_anchor_generator(cfg, default_args=None):
FILE: DLTA_AI_app/mmdetection/mmdet/core/anchor/point_generator.py
class PointGenerator (line 10) | class PointGenerator:
method _meshgrid (line 12) | def _meshgrid(self, x, y, row_major=True):
method grid_points (line 20) | def grid_points(self, featmap_size, stride=16, device='cuda'):
method valid_flags (line 30) | def valid_flags(self, featmap_size, valid_size, device='cuda'):
class MlvlPointGenerator (line 44) | class MlvlPointGenerator:
method __init__ (line 55) | def __init__(self, strides, offset=0.5):
method num_levels (line 60) | def num_levels(self):
method num_base_priors (line 65) | def num_base_priors(self):
method _meshgrid (line 70) | def _meshgrid(self, x, y, row_major=True):
method grid_priors (line 80) | def grid_priors(self,
method single_level_grid_priors (line 119) | def single_level_grid_priors(self,
method valid_flags (line 177) | def valid_flags(self, featmap_sizes, pad_shape, device='cuda'):
method single_level_valid_flags (line 205) | def single_level_valid_flags(self,
method sparse_priors (line 234) | def sparse_priors(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/anchor/utils.py
function images_to_levels (line 5) | def images_to_levels(target, num_levels):
function anchor_inside_flags (line 21) | def anchor_inside_flags(flat_anchors,
function calc_region (line 50) | def calc_region(bbox, ratio, featmap_size=None):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/approx_max_iou_assigner.py
class ApproxMaxIoUAssigner (line 10) | class ApproxMaxIoUAssigner(MaxIoUAssigner):
method __init__ (line 40) | def __init__(self,
method assign (line 60) | def assign(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/assign_result.py
class AssignResult (line 7) | class AssignResult(util_mixins.NiceRepr):
method __init__ (line 43) | def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
method num_preds (line 52) | def num_preds(self):
method set_extra_property (line 56) | def set_extra_property(self, key, value):
method get_extra_property (line 61) | def get_extra_property(self, key):
method info (line 66) | def info(self):
method __nice__ (line 78) | def __nice__(self):
method random (line 98) | def random(cls, **kwargs):
method add_gt_ (line 192) | def add_gt_(self, gt_labels):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/atss_assigner.py
class ATSSAssigner (line 13) | class ATSSAssigner(BaseAssigner):
method __init__ (line 29) | def __init__(self,
method assign (line 52) | def assign(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/base_assigner.py
class BaseAssigner (line 5) | class BaseAssigner(metaclass=ABCMeta):
method assign (line 9) | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/center_region_assigner.py
function scale_boxes (line 10) | def scale_boxes(bboxes, scale):
function is_located_in (line 37) | def is_located_in(points, bboxes):
function bboxes_area (line 55) | def bboxes_area(bboxes):
class CenterRegionAssigner (line 72) | class CenterRegionAssigner(BaseAssigner):
method __init__ (line 94) | def __init__(self,
method get_gt_priorities (line 108) | def get_gt_priorities(self, gt_bboxes):
method assign (line 126) | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...
method assign_one_hot_gt_indices (line 257) | def assign_one_hot_gt_indices(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/grid_assigner.py
class GridAssigner (line 11) | class GridAssigner(BaseAssigner):
method __init__ (line 31) | def __init__(self,
method assign (line 43) | def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=N...
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/hungarian_assigner.py
class HungarianAssigner (line 13) | class HungarianAssigner(BaseAssigner):
method __init__ (line 41) | def __init__(self,
method assign (line 49) | def assign(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/mask_hungarian_assigner.py
class MaskHungarianAssigner (line 12) | class MaskHungarianAssigner(BaseAssigner):
method __init__ (line 33) | def __init__(self,
method assign (line 42) | def assign(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/max_iou_assigner.py
class MaxIoUAssigner (line 11) | class MaxIoUAssigner(BaseAssigner):
method __init__ (line 46) | def __init__(self,
method assign (line 66) | def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...
method assign_wrt_overlaps (line 133) | def assign_wrt_overlaps(self, overlaps, gt_labels=None):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/point_assigner.py
class PointAssigner (line 10) | class PointAssigner(BaseAssigner):
method __init__ (line 20) | def __init__(self, scale=4, pos_num=3):
method assign (line 24) | def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=N...
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/region_assigner.py
function calc_region (line 10) | def calc_region(bbox, ratio, stride, featmap_size=None):
function anchor_ctr_inside_region_flags (line 27) | def anchor_ctr_inside_region_flags(anchors, stride, region):
class RegionAssigner (line 38) | class RegionAssigner(BaseAssigner):
method __init__ (line 54) | def __init__(self, center_ratio=0.2, ignore_ratio=0.5):
method assign (line 58) | def assign(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/sim_ota_assigner.py
class SimOTAAssigner (line 14) | class SimOTAAssigner(BaseAssigner):
method __init__ (line 28) | def __init__(self,
method assign (line 38) | def assign(self,
method _assign (line 95) | def _assign(self,
method get_in_gt_and_in_center_info (line 186) | def get_in_gt_and_in_center_info(self, priors, gt_bboxes):
method dynamic_k_matching (line 230) | def dynamic_k_matching(self, cost, pairwise_ious, num_gt, valid_mask):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/task_aligned_assigner.py
class TaskAlignedAssigner (line 13) | class TaskAlignedAssigner(BaseAssigner):
method __init__ (line 31) | def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D')):
method assign (line 36) | def assign(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/assigners/uniform_assigner.py
class UniformAssigner (line 12) | class UniformAssigner(BaseAssigner):
method __init__ (line 25) | def __init__(self,
method assign (line 35) | def assign(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/builder.py
function build_assigner (line 9) | def build_assigner(cfg, **default_args):
function build_sampler (line 14) | def build_sampler(cfg, **default_args):
function build_bbox_coder (line 19) | def build_bbox_coder(cfg, **default_args):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/base_bbox_coder.py
class BaseBBoxCoder (line 5) | class BaseBBoxCoder(metaclass=ABCMeta):
method __init__ (line 8) | def __init__(self, **kwargs):
method encode (line 12) | def encode(self, bboxes, gt_bboxes):
method decode (line 16) | def decode(self, bboxes, bboxes_pred):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/bucketing_bbox_coder.py
class BucketingBBoxCoder (line 13) | class BucketingBBoxCoder(BaseBBoxCoder):
method __init__ (line 35) | def __init__(self,
method encode (line 50) | def encode(self, bboxes, gt_bboxes):
method decode (line 72) | def decode(self, bboxes, pred_bboxes, max_shape=None):
function generat_buckets (line 96) | def generat_buckets(proposals, num_buckets, scale_factor=1.0):
function bbox2bucket (line 145) | def bbox2bucket(proposals,
function bucket2bbox (line 269) | def bucket2bbox(proposals,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py
class DeltaXYWHBBoxCoder (line 13) | class DeltaXYWHBBoxCoder(BaseBBoxCoder):
method __init__ (line 34) | def __init__(self,
method encode (line 47) | def encode(self, bboxes, gt_bboxes):
method decode (line 65) | def decode(self,
function bbox2delta (line 118) | def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., ...
function delta2bbox (line 164) | def delta2bbox(rois,
function onnx_delta2bbox (line 263) | def onnx_delta2bbox(rois,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/distance_point_bbox_coder.py
class DistancePointBBoxCoder (line 8) | class DistancePointBBoxCoder(BaseBBoxCoder):
method __init__ (line 19) | def __init__(self, clip_border=True):
method encode (line 23) | def encode(self, points, gt_bboxes, max_dis=None, eps=0.1):
method decode (line 41) | def decode(self, points, pred_bboxes, max_shape=None):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
class LegacyDeltaXYWHBBoxCoder (line 11) | class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder):
method __init__ (line 34) | def __init__(self,
method encode (line 41) | def encode(self, bboxes, gt_bboxes):
method decode (line 59) | def decode(self,
function legacy_bbox2delta (line 85) | def legacy_bbox2delta(proposals,
function legacy_delta2bbox (line 134) | def legacy_delta2bbox(rois,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/pseudo_bbox_coder.py
class PseudoBBoxCoder (line 7) | class PseudoBBoxCoder(BaseBBoxCoder):
method __init__ (line 10) | def __init__(self, **kwargs):
method encode (line 13) | def encode(self, bboxes, gt_bboxes):
method decode (line 17) | def decode(self, bboxes, pred_bboxes):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/tblr_bbox_coder.py
class TBLRBBoxCoder (line 10) | class TBLRBBoxCoder(BaseBBoxCoder):
method __init__ (line 26) | def __init__(self, normalizer=4.0, clip_border=True):
method encode (line 31) | def encode(self, bboxes, gt_bboxes):
method decode (line 50) | def decode(self, bboxes, pred_bboxes, max_shape=None):
function bboxes2tblr (line 77) | def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True):
function tblr2bboxes (line 124) | def tblr2bboxes(priors,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/coder/yolo_bbox_coder.py
class YOLOBBoxCoder (line 10) | class YOLOBBoxCoder(BaseBBoxCoder):
method __init__ (line 22) | def __init__(self, eps=1e-6):
method encode (line 27) | def encode(self, bboxes, gt_bboxes, stride):
method decode (line 62) | def decode(self, bboxes, pred_bboxes, stride):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/demodata.py
function random_boxes (line 8) | def random_boxes(num=1, scale=1, rng=None):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/iou_calculators/builder.py
function build_iou_calculator (line 7) | def build_iou_calculator(cfg, default_args=None):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
function cast_tensor_type (line 7) | def cast_tensor_type(x, scale=1., dtype=None):
function fp16_clamp (line 14) | def fp16_clamp(x, min=None, max=None):
class BboxOverlaps2D (line 23) | class BboxOverlaps2D:
method __init__ (line 26) | def __init__(self, scale=1., dtype=None):
method __call__ (line 30) | def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False):
method __repr__ (line 67) | def __repr__(self):
function bbox_overlaps (line 74) | def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e...
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/match_costs/builder.py
function build_match_cost (line 7) | def build_match_cost(cfg, default_args=None):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/match_costs/match_cost.py
class BBoxL1Cost (line 11) | class BBoxL1Cost:
method __init__ (line 29) | def __init__(self, weight=1., box_format='xyxy'):
method __call__ (line 34) | def __call__(self, bbox_pred, gt_bboxes):
class FocalLossCost (line 55) | class FocalLossCost:
method __init__ (line 80) | def __init__(self,
method _focal_loss_cost (line 92) | def _focal_loss_cost(self, cls_pred, gt_labels):
method _mask_focal_loss_cost (line 111) | def _mask_focal_loss_cost(self, cls_pred, gt_labels):
method __call__ (line 136) | def __call__(self, cls_pred, gt_labels):
class ClassificationCost (line 153) | class ClassificationCost:
method __init__ (line 174) | def __init__(self, weight=1.):
method __call__ (line 177) | def __call__(self, cls_pred, gt_labels):
class IoUCost (line 197) | class IoUCost:
method __init__ (line 215) | def __init__(self, iou_mode='giou', weight=1.):
method __call__ (line 219) | def __call__(self, bboxes, gt_bboxes):
class DiceCost (line 239) | class DiceCost:
method __init__ (line 254) | def __init__(self, weight=1., pred_act=False, eps=1e-3, naive_dice=True):
method binary_mask_dice_loss (line 260) | def binary_mask_dice_loss(self, mask_preds, gt_masks):
method __call__ (line 283) | def __call__(self, mask_preds, gt_masks):
class CrossEntropyLossCost (line 299) | class CrossEntropyLossCost:
method __init__ (line 315) | def __init__(self, weight=1., use_sigmoid=True):
method _binary_cross_entropy (line 320) | def _binary_cross_entropy(self, cls_pred, gt_labels):
method __call__ (line 344) | def __call__(self, cls_pred, gt_labels):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/base_sampler.py
class BaseSampler (line 9) | class BaseSampler(metaclass=ABCMeta):
method __init__ (line 12) | def __init__(self,
method _sample_pos (line 26) | def _sample_pos(self, assign_result, num_expected, **kwargs):
method _sample_neg (line 31) | def _sample_neg(self, assign_result, num_expected, **kwargs):
method sample (line 35) | def sample(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/combined_sampler.py
class CombinedSampler (line 7) | class CombinedSampler(BaseSampler):
method __init__ (line 10) | def __init__(self, pos_sampler, neg_sampler, **kwargs):
method _sample_pos (line 15) | def _sample_pos(self, **kwargs):
method _sample_neg (line 19) | def _sample_neg(self, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py
class InstanceBalancedPosSampler (line 10) | class InstanceBalancedPosSampler(RandomSampler):
method _sample_pos (line 14) | def _sample_pos(self, assign_result, num_expected, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
class IoUBalancedNegSampler (line 10) | class IoUBalancedNegSampler(RandomSampler):
method __init__ (line 30) | def __init__(self,
method sample_via_interval (line 47) | def sample_via_interval(self, max_overlaps, full_set, num_expected):
method _sample_neg (line 89) | def _sample_neg(self, assign_result, num_expected, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/mask_pseudo_sampler.py
class MaskPseudoSampler (line 13) | class MaskPseudoSampler(BaseSampler):
method __init__ (line 16) | def __init__(self, **kwargs):
method _sample_pos (line 19) | def _sample_pos(self, **kwargs):
method _sample_neg (line 23) | def _sample_neg(self, **kwargs):
method sample (line 27) | def sample(self, assign_result, masks, gt_masks, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/mask_sampling_result.py
class MaskSamplingResult (line 10) | class MaskSamplingResult(SamplingResult):
method __init__ (line 13) | def __init__(self, pos_inds, neg_inds, masks, gt_masks, assign_result,
method masks (line 37) | def masks(self):
method __nice__ (line 41) | def __nice__(self):
method info (line 50) | def info(self):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/ohem_sampler.py
class OHEMSampler (line 10) | class OHEMSampler(BaseSampler):
method __init__ (line 16) | def __init__(self,
method hard_mining (line 34) | def hard_mining(self, inds, num_expected, bboxes, labels, feats):
method _sample_pos (line 55) | def _sample_pos(self,
method _sample_neg (line 83) | def _sample_neg(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/pseudo_sampler.py
class PseudoSampler (line 10) | class PseudoSampler(BaseSampler):
method __init__ (line 13) | def __init__(self, **kwargs):
method _sample_pos (line 16) | def _sample_pos(self, **kwargs):
method _sample_neg (line 20) | def _sample_neg(self, **kwargs):
method sample (line 24) | def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/random_sampler.py
class RandomSampler (line 9) | class RandomSampler(BaseSampler):
method __init__ (line 21) | def __init__(self,
method random_choice (line 32) | def random_choice(self, gallery, num):
method _sample_pos (line 64) | def _sample_pos(self, assign_result, num_expected, **kwargs):
method _sample_neg (line 74) | def _sample_neg(self, assign_result, num_expected, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/sampling_result.py
class SamplingResult (line 7) | class SamplingResult(util_mixins.NiceRepr):
method __init__ (line 26) | def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result,
method bboxes (line 53) | def bboxes(self):
method to (line 57) | def to(self, device):
method __nice__ (line 72) | def __nice__(self):
method info (line 81) | def info(self):
method random (line 94) | def random(cls, rng=None, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/samplers/score_hlr_sampler.py
class ScoreHLRSampler (line 12) | class ScoreHLRSampler(BaseSampler):
method __init__ (line 33) | def __init__(self,
method random_choice (line 63) | def random_choice(gallery, num):
method _sample_pos (line 92) | def _sample_pos(self, assign_result, num_expected, **kwargs):
method _sample_neg (line 100) | def _sample_neg(self,
method sample (line 216) | def sample(self,
FILE: DLTA_AI_app/mmdetection/mmdet/core/bbox/transforms.py
function find_inside_bboxes (line 6) | def find_inside_bboxes(bboxes, img_h, img_w):
function bbox_flip (line 22) | def bbox_flip(bboxes, img_shape, direction='horizontal'):
function bbox_mapping (line 51) | def bbox_mapping(bboxes,
function bbox_mapping_back (line 63) | def bbox_mapping_back(bboxes,
function bbox2roi (line 75) | def bbox2roi(bbox_list):
function roi2bbox (line 97) | def roi2bbox(rois):
function bbox2result (line 116) | def bbox2result(bboxes, labels, num_classes):
function distance2bbox (line 136) | def distance2bbox(points, distance, max_shape=None):
function bbox2distance (line 189) | def bbox2distance(points, bbox, max_dis=None, eps=0.1):
function bbox_rescale (line 213) | def bbox_rescale(bboxes, scale_factor=1.0):
function bbox_cxcywh_to_xyxy (line 245) | def bbox_cxcywh_to_xyxy(bbox):
function bbox_xyxy_to_cxcywh (line 259) | def bbox_xyxy_to_cxcywh(bbox):
FILE: DLTA_AI_app/mmdetection/mmdet/core/data_structures/general_data.py
class GeneralData (line 10) | class GeneralData(NiceRepr):
method __init__ (line 87) | def __init__(self, meta_info=None, data=None):
method set_meta_info (line 97) | def set_meta_info(self, meta_info):
method set_data (line 129) | def set_data(self, data):
method new (line 141) | def new(self, meta_info=None, data=None):
method keys (line 159) | def keys(self):
method meta_info_keys (line 166) | def meta_info_keys(self):
method values (line 173) | def values(self):
method meta_info_values (line 180) | def meta_info_values(self):
method items (line 187) | def items(self):
method meta_info_items (line 191) | def meta_info_items(self):
method __setattr__ (line 195) | def __setattr__(self, name, val):
method __delattr__ (line 211) | def __delattr__(self, item):
method __getitem__ (line 228) | def __getitem__(self, name):
method get (line 231) | def get(self, *args):
method pop (line 235) | def pop(self, *args):
method __contains__ (line 252) | def __contains__(self, item):
method to (line 257) | def to(self, *args, **kwargs):
method cpu (line 267) | def cpu(self):
method npu (line 277) | def npu(self):
method mlu (line 287) | def mlu(self):
method cuda (line 297) | def cuda(self):
method detach (line 307) | def detach(self):
method numpy (line 317) | def numpy(self):
method __nice__ (line 326) | def __nice__(self):
FILE: DLTA_AI_app/mmdetection/mmdet/core/data_structures/instance_data.py
class InstanceData (line 10) | class InstanceData(GeneralData):
method __setattr__ (line 66) | def __setattr__(self, name, value):
method __getitem__ (line 90) | def __getitem__(self, item):
method cat (line 151) | def cat(instances_list):
method __len__ (line 183) | def __len__(self):
FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/bbox_overlaps.py
function bbox_overlaps (line 5) | def bbox_overlaps(bboxes1,
FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/class_names.py
function wider_face_classes (line 5) | def wider_face_classes():
function voc_classes (line 9) | def voc_classes():
function imagenet_det_classes (line 17) | def imagenet_det_classes():
function imagenet_vid_classes (line 58) | def imagenet_vid_classes():
function coco_classes (line 68) | def coco_classes():
function cityscapes_classes (line 86) | def cityscapes_classes():
function oid_challenge_classes (line 93) | def oid_challenge_classes():
function oid_v6_classes (line 188) | def oid_v6_classes():
function get_classes (line 318) | def get_classes(dataset):
FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/eval_hooks.py
function _calc_dynamic_intervals (line 12) | def _calc_dynamic_intervals(start_interval, dynamic_interval_list):
class EvalHook (line 24) | class EvalHook(BaseEvalHook):
method __init__ (line 26) | def __init__(self, *args, dynamic_intervals=None, **kwargs):
method _decide_interval (line 35) | def _decide_interval(self, runner):
method before_train_epoch (line 42) | def before_train_epoch(self, runner):
method before_train_iter (line 47) | def before_train_iter(self, runner):
method _do_evaluate (line 51) | def _do_evaluate(self, runner):
class DistEvalHook (line 73) | class DistEvalHook(BaseDistEvalHook):
method __init__ (line 75) | def __init__(self, *args, dynamic_intervals=None, **kwargs):
method _decide_interval (line 84) | def _decide_interval(self, runner):
method before_train_epoch (line 91) | def before_train_epoch(self, runner):
method before_train_iter (line 96) | def before_train_iter(self, runner):
method _do_evaluate (line 100) | def _do_evaluate(self, runner):
FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/mean_ap.py
function average_precision (line 13) | def average_precision(recalls, precisions, mode='area'):
function tpfp_imagenet (line 60) | def tpfp_imagenet(det_bboxes,
function tpfp_default (line 169) | def tpfp_default(det_bboxes,
function tpfp_openimages (line 272) | def tpfp_openimages(det_bboxes,
function get_cls_results (line 477) | def get_cls_results(det_results, annotations, class_id):
function get_cls_group_ofs (line 504) | def get_cls_group_ofs(annotations, class_id):
function eval_map (line 525) | def eval_map(det_results,
function print_map_summary (line 717) | def print_map_summary(mean_ap,
FILE: DLTA_AI_app/mmdetection/mmdet/core/evaluation/recall.py
function _recalls (line 11) | def _recalls(all_ious, proposal_nums, thrs):
function set_recall_param (line 44) | def set_recall_param(proposal_nums, iou_thrs):
function eval_recalls (line 65) | def eval_recalls(gts,
function print_recall_summary (line 117) | def print_recall_summary(recalls,
function plot_num_recall (line 150) | def plot_num_recall(recalls, proposal_nums):
function plot_iou_recall (line 175) | def plot_iou_recall(recalls, iou_thrs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/export/model_wrappers.py
class DeployBaseDetector (line 12) | class DeployBaseDetector(BaseDetector):
method __init__ (line 15) | def __init__(self, class_names, device_id):
method simple_test (line 20) | def simple_test(self, img, img_metas, **kwargs):
method aug_test (line 23) | def aug_test(self, imgs, img_metas, **kwargs):
method extract_feat (line 26) | def extract_feat(self, imgs):
method forward_train (line 29) | def forward_train(self, imgs, img_metas, **kwargs):
method val_step (line 32) | def val_step(self, data, optimizer):
method train_step (line 35) | def train_step(self, data, optimizer):
method forward_test (line 38) | def forward_test(self, *, img, img_metas, **kwargs):
method async_simple_test (line 41) | def async_simple_test(self, img, img_metas, **kwargs):
method forward (line 44) | def forward(self, img, img_metas, return_loss=True, **kwargs):
class ONNXRuntimeDetector (line 96) | class ONNXRuntimeDetector(DeployBaseDetector):
method __init__ (line 99) | def __init__(self, onnx_file, class_names, device_id):
method forward_test (line 130) | def forward_test(self, imgs, img_metas, **kwargs):
class TensorRTDetector (line 152) | class TensorRTDetector(DeployBaseDetector):
method __init__ (line 155) | def __init__(self, engine_file, class_names, device_id, output_names=N...
method forward_test (line 177) | def forward_test(self, imgs, img_metas, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/export/onnx_helper.py
function dynamic_clip_for_onnx (line 7) | def dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape):
function get_k_for_topk (line 46) | def get_k_for_topk(k, size):
function add_dummy_nms_for_onnx (line 82) | def add_dummy_nms_for_onnx(boxes,
class DummyONNXNMSop (line 201) | class DummyONNXNMSop(torch.autograd.Function):
method forward (line 208) | def forward(ctx, boxes, scores, max_output_boxes_per_class, iou_thresh...
method symbolic (line 214) | def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold,
FILE: DLTA_AI_app/mmdetection/mmdet/core/export/pytorch2onnx.py
function generate_inputs_and_wrap_model (line 10) | def generate_inputs_and_wrap_model(config_path,
function build_model_from_cfg (line 65) | def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None):
function preprocess_example_input (line 102) | def preprocess_example_input(input_config):
FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/checkloss_hook.py
class CheckInvalidLossHook (line 7) | class CheckInvalidLossHook(Hook):
method __init__ (line 18) | def __init__(self, interval=50):
method after_train_iter (line 21) | def after_train_iter(self, runner):
FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/ema.py
class BaseEMAHook (line 8) | class BaseEMAHook(Hook):
method __init__ (line 32) | def __init__(self,
method before_run (line 45) | def before_run(self, runner):
method get_momentum (line 67) | def get_momentum(self, runner):
method after_train_iter (line 71) | def after_train_iter(self, runner):
method after_train_epoch (line 84) | def after_train_epoch(self, runner):
method before_train_epoch (line 89) | def before_train_epoch(self, runner):
method _swap_ema_parameters (line 94) | def _swap_ema_parameters(self):
class ExpMomentumEMAHook (line 104) | class ExpMomentumEMAHook(BaseEMAHook):
method __init__ (line 112) | def __init__(self, total_iter=2000, **kwargs):
class LinearMomentumEMAHook (line 119) | class LinearMomentumEMAHook(BaseEMAHook):
method __init__ (line 127) | def __init__(self, warm_up=100, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/memory_profiler_hook.py
class MemoryProfilerHook (line 6) | class MemoryProfilerHook(Hook):
method __init__ (line 15) | def __init__(self, interval=50):
method after_iter (line 34) | def after_iter(self, runner):
FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/set_epoch_info_hook.py
class SetEpochInfoHook (line 7) | class SetEpochInfoHook(Hook):
method before_train_epoch (line 10) | def before_train_epoch(self, runner):
FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/sync_norm_hook.py
function get_norm_states (line 11) | def get_norm_states(module):
class SyncNormHook (line 21) | class SyncNormHook(Hook):
method __init__ (line 30) | def __init__(self, num_last_epochs=15, interval=1):
method before_train_epoch (line 34) | def before_train_epoch(self, runner):
method after_train_epoch (line 40) | def after_train_epoch(self, runner):
FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/sync_random_size_hook.py
class SyncRandomSizeHook (line 12) | class SyncRandomSizeHook(Hook):
method __init__ (line 33) | def __init__(self,
method after_train_epoch (line 52) | def after_train_epoch(self, runner):
FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/wandblogger_hook.py
class MMDetWandbHook (line 21) | class MMDetWandbHook(WandbLoggerHook):
method __init__ (line 95) | def __init__(self,
method import_wandb (line 114) | def import_wandb(self):
method before_run (line 134) | def before_run(self, runner):
method after_train_epoch (line 206) | def after_train_epoch(self, runner):
method after_train_iter (line 239) | def after_train_iter(self, runner):
method after_run (line 279) | def after_run(self, runner):
method _update_wandb_config (line 282) | def _update_wandb_config(self, runner):
method _log_ckpt_as_artifact (line 294) | def _log_ckpt_as_artifact(self, model_path, aliases, metadata=None):
method _get_eval_results (line 307) | def _get_eval_results(self):
method _init_data_table (line 314) | def _init_data_table(self):
method _init_pred_table (line 319) | def _init_pred_table(self):
method _add_ground_truth (line 324) | def _add_ground_truth(self, runner):
method _log_predictions (line 399) | def _log_predictions(self, results):
method _get_wandb_bboxes (line 459) | def _get_wandb_bboxes(self, bboxes, labels, log_gt=True):
method _get_wandb_masks (line 511) | def _get_wandb_masks(self,
method _log_data_table (line 559) | def _log_data_table(self):
method _log_eval_table (line 576) | def _log_eval_table(self, idx):
FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/yolox_lrupdater_hook.py
class YOLOXLrUpdaterHook (line 8) | class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook):
method __init__ (line 23) | def __init__(self, num_last_epochs, **kwargs):
method get_warmup_lr (line 27) | def get_warmup_lr(self, cur_iters):
method get_lr (line 44) | def get_lr(self, runner, base_lr):
FILE: DLTA_AI_app/mmdetection/mmdet/core/hook/yolox_mode_switch_hook.py
class YOLOXModeSwitchHook (line 7) | class YOLOXModeSwitchHook(Hook):
method __init__ (line 21) | def __init__(self,
method before_train_epoch (line 28) | def before_train_epoch(self, runner):
FILE: DLTA_AI_app/mmdetection/mmdet/core/mask/mask_target.py
function mask_target (line 7) | def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_...
function mask_target_single (line 67) | def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg):
FILE: DLTA_AI_app/mmdetection/mmdet/core/mask/structures.py
class BaseInstanceMasks (line 12) | class BaseInstanceMasks(metaclass=ABCMeta):
method rescale (line 16) | def rescale(self, scale, interpolation='nearest'):
method resize (line 29) | def resize(self, out_shape, interpolation='nearest'):
method flip (line 41) | def flip(self, flip_direction='horizontal'):
method pad (line 52) | def pad(self, out_shape, pad_val):
method crop (line 64) | def crop(self, bbox):
method crop_and_resize (line 75) | def crop_and_resize(self,
method expand (line 104) | def expand(self, expanded_h, expanded_w, top, left):
method areas (line 109) | def areas(self):
method to_ndarray (line 113) | def to_ndarray(self):
method to_tensor (line 121) | def to_tensor(self, dtype, device):
method translate (line 133) | def translate(self,
method shear (line 153) | def shear(self,
method rotate (line 175) | def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
class BitmapMasks (line 193) | class BitmapMasks(BaseInstanceMasks):
method __init__ (line 222) | def __init__(self, masks, height, width):
method __getitem__ (line 239) | def __getitem__(self, index):
method __iter__ (line 251) | def __iter__(self):
method __repr__ (line 254) | def __repr__(self):
method __len__ (line 261) | def __len__(self):
method rescale (line 265) | def rescale(self, scale, interpolation='nearest'):
method resize (line 278) | def resize(self, out_shape, interpolation='nearest'):
method flip (line 290) | def flip(self, flip_direction='horizontal'):
method pad (line 303) | def pad(self, out_shape, pad_val=0):
method crop (line 314) | def crop(self, bbox):
method crop_and_resize (line 333) | def crop_and_resize(self,
method expand (line 369) | def expand(self, expanded_h, expanded_w, top, left):
method translate (line 381) | def translate(self,
method shear (line 431) | def shear(self,
method rotate (line 466) | def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
method areas (line 499) | def areas(self):
method to_ndarray (line 503) | def to_ndarray(self):
method to_tensor (line 507) | def to_tensor(self, dtype, device):
method random (line 512) | def random(cls,
method get_bboxes (line 532) | def get_bboxes(self):
class PolygonMasks (line 548) | class PolygonMasks(BaseInstanceMasks):
method __init__ (line 588) | def __init__(self, masks, height, width):
method __getitem__ (line 598) | def __getitem__(self, index):
method __iter__ (line 621) | def __iter__(self):
method __repr__ (line 624) | def __repr__(self):
method __len__ (line 631) | def __len__(self):
method rescale (line 635) | def rescale(self, scale, interpolation=None):
method resize (line 644) | def resize(self, out_shape, interpolation=None):
method flip (line 663) | def flip(self, flip_direction='horizontal'):
method crop (line 687) | def crop(self, bbox):
method pad (line 716) | def pad(self, out_shape, pad_val=0):
method expand (line 720) | def expand(self, *args, **kwargs):
method crop_and_resize (line 724) | def crop_and_resize(self,
method translate (line 765) | def translate(self,
method shear (line 799) | def shear(self,
method rotate (line 831) | def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0):
method to_bitmap (line 860) | def to_bitmap(self):
method areas (line 866) | def areas(self):
method _polygon_area (line 884) | def _polygon_area(self, x, y):
method to_ndarray (line 900) | def to_ndarray(self):
method to_tensor (line 910) | def to_tensor(self, dtype, device):
method random (line 920) | def random(cls,
method get_bboxes (line 1039) | def get_bboxes(self):
function polygon_to_bitmap (line 1058) | def polygon_to_bitmap(polygons, height, width):
function bitmap_to_polygon (line 1075) | def bitmap_to_polygon(bitmap):
FILE: DLTA_AI_app/mmdetection/mmdet/core/mask/utils.py
function split_combined_polys (line 8) | def split_combined_polys(polys, poly_lens, polys_per_mask):
function encode_mask_results (line 38) | def encode_mask_results(mask_results):
function mask2bbox (line 68) | def mask2bbox(masks):
FILE: DLTA_AI_app/mmdetection/mmdet/core/optimizers/builder.py
function build_optimizer_constructor (line 11) | def build_optimizer_constructor(cfg):
function build_optimizer (line 22) | def build_optimizer(model, cfg):
FILE: DLTA_AI_app/mmdetection/mmdet/core/optimizers/layer_decay_optimizer_constructor.py
function get_layer_id_for_convnext (line 10) | def get_layer_id_for_convnext(var_name, max_layer_id):
function get_stage_id_for_convnext (line 53) | def get_stage_id_for_convnext(var_name, max_stage_id):
class LearningRateDecayOptimizerConstructor (line 79) | class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor):
method add_params (line 83) | def add_params(self, params, module, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/core/post_processing/bbox_nms.py
function multiclass_nms (line 8) | def multiclass_nms(multi_bboxes,
function fast_nms (line 98) | def fast_nms(multi_bboxes,
FILE: DLTA_AI_app/mmdetection/mmdet/core/post_processing/matrix_nms.py
function mask_matrix_nms (line 5) | def mask_matrix_nms(masks,
FILE: DLTA_AI_app/mmdetection/mmdet/core/post_processing/merge_augs.py
function merge_aug_proposals (line 13) | def merge_aug_proposals(aug_proposals, img_metas, cfg):
function merge_aug_bboxes (line 84) | def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg):
function merge_aug_scores (line 113) | def merge_aug_scores(aug_scores):
function merge_aug_masks (line 121) | def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None):
FILE: DLTA_AI_app/mmdetection/mmdet/core/utils/dist_utils.py
function _allreduce_coalesced (line 15) | def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1):
function allreduce_grads (line 37) | def allreduce_grads(params, coalesce=True, bucket_size_mb=-1):
class DistOptimizerHook (line 59) | class DistOptimizerHook(OptimizerHook):
method __init__ (line 62) | def __init__(self, *args, **kwargs):
function reduce_mean (line 68) | def reduce_mean(tensor):
function obj2tensor (line 77) | def obj2tensor(pyobj, device='cuda'):
function tensor2obj (line 83) | def tensor2obj(tensor):
function _get_global_gloo_group (line 89) | def _get_global_gloo_group():
function all_reduce_dict (line 98) | def all_reduce_dict(py_dict, op='sum', group=None, to_float=True):
function sync_random_seed (line 157) | def sync_random_seed(seed=None, device='cuda'):
FILE: DLTA_AI_app/mmdetection/mmdet/core/utils/misc.py
function multi_apply (line 11) | def multi_apply(func, *args, **kwargs):
function unmap (line 33) | def unmap(data, count, inds, fill=0):
function mask2ndarray (line 46) | def mask2ndarray(mask):
function flip_tensor (line 65) | def flip_tensor(src_tensor, flip_direction):
function select_single_mlvl (line 88) | def select_single_mlvl(mlvl_tensors, batch_id, detach=True):
function filter_scores_and_topk (line 119) | def filter_scores_and_topk(scores, score_thr, topk, results=None):
function center_of_mass (line 168) | def center_of_mass(mask, esp=1e-6):
function generate_coordinate (line 190) | def generate_coordinate(featmap_sizes, device='cuda'):
FILE: DLTA_AI_app/mmdetection/mmdet/core/visualization/image.py
function color_val_matplotlib (line 25) | def color_val_matplotlib(color):
function _get_adaptive_scales (line 40) | def _get_adaptive_scales(areas, min_area=800, max_area=30000):
function _get_bias_color (line 63) | def _get_bias_color(base, max_dist=30):
function draw_bboxes (line 81) | def draw_bboxes(ax, bboxes, color='g', alpha=0.8, thickness=2):
function draw_labels (line 114) | def draw_labels(ax,
function draw_masks (line 166) | def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8):
function imshow_det_bboxes (line 208) | def imshow_det_bboxes(img,
function imshow_gt_det_bboxes (line 380) | def imshow_gt_det_bboxes(img,
FILE: DLTA_AI_app/mmdetection/mmdet/core/visualization/palette.py
function palette_val (line 6) | def palette_val(palette):
function get_palette (line 22) | def get_palette(palette, num_classes):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/csp_darknet.py
class Focus (line 14) | class Focus(nn.Module):
method __init__ (line 30) | def __init__(self,
method forward (line 49) | def forward(self, x):
class SPPBottleneck (line 67) | class SPPBottleneck(BaseModule):
method __init__ (line 85) | def __init__(self,
method forward (line 116) | def forward(self, x):
class CSPDarknet (line 124) | class CSPDarknet(BaseModule):
method __init__ (line 177) | def __init__(self,
method _freeze_stages (line 261) | def _freeze_stages(self):
method train (line 269) | def train(self, mode=True):
method forward (line 277) | def forward(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/darknet.py
class ResBlock (line 14) | class ResBlock(BaseModule):
method __init__ (line 33) | def __init__(self,
method forward (line 50) | def forward(self, x):
class Darknet (line 60) | class Darknet(BaseModule):
method __init__ (line 101) | def __init__(self,
method forward (line 153) | def forward(self, x):
method _freeze_stages (line 163) | def _freeze_stages(self):
method train (line 171) | def train(self, mode=True):
method make_conv_res_block (line 180) | def make_conv_res_block(in_channels,
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/detectors_resnet.py
class Bottleneck (line 16) | class Bottleneck(_Bottleneck):
method __init__ (line 36) | def __init__(self,
method rfp_forward (line 73) | def rfp_forward(self, x, rfp_feat):
class ResLayer (line 120) | class ResLayer(Sequential):
method __init__ (line 146) | def __init__(self,
class DetectoRS_ResNet (line 212) | class DetectoRS_ResNet(ResNet):
method __init__ (line 234) | def __init__(self,
method init_weights (line 296) | def init_weights(self):
method make_res_layer (line 325) | def make_res_layer(self, **kwargs):
method forward (line 329) | def forward(self, x):
method rfp_forward (line 336) | def rfp_forward(self, x, rfp_feats):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/detectors_resnext.py
class Bottleneck (line 11) | class Bottleneck(_Bottleneck):
method __init__ (line 14) | def __init__(self,
class DetectoRS_ResNeXt (line 99) | class DetectoRS_ResNeXt(DetectoRS_ResNet):
method __init__ (line 113) | def __init__(self, groups=1, base_width=4, **kwargs):
method make_res_layer (line 118) | def make_res_layer(self, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/efficientnet.py
class EdgeResidual (line 16) | class EdgeResidual(BaseModule):
method __init__ (line 41) | def __init__(self,
method forward (line 91) | def forward(self, x):
function model_scaling (line 115) | def model_scaling(layer_setting, arch_setting):
class EfficientNet (line 159) | class EfficientNet(BaseModule):
method __init__ (line 254) | def __init__(self,
method make_layer (line 327) | def make_layer(self):
method forward (line 395) | def forward(self, x):
method _freeze_stages (line 404) | def _freeze_stages(self):
method train (line 411) | def train(self, mode=True):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/hourglass.py
class HourglassModule (line 12) | class HourglassModule(BaseModule):
method __init__ (line 30) | def __init__(self,
method forward (line 80) | def forward(self, x):
class HourglassNet (line 97) | class HourglassNet(BaseModule):
method __init__ (line 131) | def __init__(self,
method init_weights (line 195) | def init_weights(self):
method forward (line 203) | def forward(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/hrnet.py
class HRModule (line 13) | class HRModule(BaseModule):
method __init__ (line 20) | def __init__(self,
method _check_branches (line 49) | def _check_branches(self, num_branches, num_blocks, in_channels,
method _make_one_branch (line 66) | def _make_one_branch(self,
method _make_branches (line 112) | def _make_branches(self, num_branches, block, num_blocks, num_channels):
method _make_fuse_layers (line 121) | def _make_fuse_layers(self):
method forward (line 183) | def forward(self, x):
class HRNet (line 204) | class HRNet(BaseModule):
method __init__ (line 281) | def __init__(self,
method norm1 (line 403) | def norm1(self):
method norm2 (line 408) | def norm2(self):
method _make_transition_layer (line 412) | def _make_transition_layer(self, num_channels_pre_layer,
method _make_layer (line 458) | def _make_layer(self, block, inplanes, planes, blocks, stride=1):
method _make_stage (line 505) | def _make_stage(self, layer_config, in_channels, multiscale_output=True):
method forward (line 545) | def forward(self, x):
method train (line 581) | def train(self, mode=True):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/mobilenet_v2.py
class MobileNetV2 (line 14) | class MobileNetV2(BaseModule):
method __init__ (line 46) | def __init__(self,
method make_layer (line 138) | def make_layer(self, out_channels, num_blocks, stride, expand_ratio):
method _freeze_stages (line 167) | def _freeze_stages(self):
method forward (line 177) | def forward(self, x):
method train (line 188) | def train(self, mode=True):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/pvt.py
class MixFFN (line 23) | class MixFFN(BaseModule):
method __init__ (line 47) | def __init__(self,
method forward (line 93) | def forward(self, x, hw_shape, identity=None):
class SpatialReductionAttention (line 102) | class SpatialReductionAttention(MultiheadAttention):
method __init__ (line 129) | def __init__(self,
method forward (line 169) | def forward(self, x, hw_shape, identity=None):
method legacy_forward (line 200) | def legacy_forward(self, x, hw_shape, identity=None):
class PVTEncoderLayer (line 219) | class PVTEncoderLayer(BaseModule):
method __init__ (line 245) | def __init__(self,
method forward (line 284) | def forward(self, x, hw_shape):
class AbsolutePositionEmbedding (line 291) | class AbsolutePositionEmbedding(BaseModule):
method __init__ (line 301) | def __init__(self, pos_shape, pos_dim, drop_rate=0., init_cfg=None):
method init_weights (line 319) | def init_weights(self):
method resize_pos_embed (line 322) | def resize_pos_embed(self, pos_embed, input_shape, mode='bilinear'):
method forward (line 351) | def forward(self, x, hw_shape, mode='bilinear'):
class PyramidVisionTransformer (line 357) | class PyramidVisionTransformer(BaseModule):
method __init__ (line 410) | def __init__(self,
method init_weights (line 523) | def init_weights(self):
method forward (line 563) | def forward(self, x):
class PyramidVisionTransformerV2 (line 580) | class PyramidVisionTransformerV2(PyramidVisionTransformer):
method __init__ (line 584) | def __init__(self, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/regnet.py
class RegNet (line 14) | class RegNet(ResNet):
method __init__ (line 90) | def __init__(self,
method _make_stem_layer (line 238) | def _make_stem_layer(self, in_channels, base_channels):
method generate_regnet (line 252) | def generate_regnet(self,
method quantize_float (line 285) | def quantize_float(number, divisor):
method adjust_width_group (line 297) | def adjust_width_group(self, widths, bottleneck_ratio, groups):
method get_stages_from_blocks (line 322) | def get_stages_from_blocks(self, widths):
method forward (line 344) | def forward(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/res2net.py
class Bottle2neck (line 15) | class Bottle2neck(_Bottleneck):
method __init__ (line 18) | def __init__(self,
method forward (line 106) | def forward(self, x):
class Res2Layer (line 163) | class Res2Layer(Sequential):
method __init__ (line 182) | def __init__(self,
class Res2Net (line 243) | class Res2Net(ResNet):
method __init__ (line 303) | def __init__(self,
method make_res_layer (line 322) | def make_res_layer(self, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/resnest.py
class RSoftmax (line 17) | class RSoftmax(nn.Module):
method __init__ (line 25) | def __init__(self, radix, groups):
method forward (line 30) | def forward(self, x):
class SplitAttentionConv2d (line 41) | class SplitAttentionConv2d(BaseModule):
method __init__ (line 64) | def __init__(self,
method norm0 (line 116) | def norm0(self):
method norm1 (line 121) | def norm1(self):
method forward (line 125) | def forward(self, x):
class Bottleneck (line 154) | class Bottleneck(_Bottleneck):
method __init__ (line 173) | def __init__(self,
method forward (line 234) | def forward(self, x):
class ResNeSt (line 278) | class ResNeSt(ResNetV1d):
method __init__ (line 299) | def __init__(self,
method make_res_layer (line 313) | def make_res_layer(self, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/resnet.py
class BasicBlock (line 14) | class BasicBlock(BaseModule):
method __init__ (line 17) | def __init__(self,
method norm1 (line 58) | def norm1(self):
method norm2 (line 63) | def norm2(self):
method forward (line 67) | def forward(self, x):
class Bottleneck (line 97) | class Bottleneck(BaseModule):
method __init__ (line 100) | def __init__(self,
method make_block_plugins (line 219) | def make_block_plugins(self, in_channels, plugins):
method forward_plugin (line 242) | def forward_plugin(self, x, plugin_names):
method norm1 (line 249) | def norm1(self):
method norm2 (line 254) | def norm2(self):
method norm3 (line 259) | def norm3(self):
method forward (line 263) | def forward(self, x):
class ResNet (line 306) | class ResNet(BaseModule):
method __init__ (line 369) | def __init__(self,
method make_stage_plugins (line 494) | def make_stage_plugins(self, plugins, stage_idx):
method make_res_layer (line 556) | def make_res_layer(self, **kwargs):
method norm1 (line 561) | def norm1(self):
method _make_stem_layer (line 565) | def _make_stem_layer(self, in_channels, stem_channels):
method _freeze_stages (line 613) | def _freeze_stages(self):
method forward (line 631) | def forward(self, x):
method train (line 648) | def train(self, mode=True):
class ResNetV1d (line 661) | class ResNetV1d(ResNet):
method __init__ (line 670) | def __init__(self, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/resnext.py
class Bottleneck (line 12) | class Bottleneck(_Bottleneck):
method __init__ (line 15) | def __init__(self,
method _del_block_plugins (line 98) | def _del_block_plugins(self, plugin_names):
class ResNeXt (line 110) | class ResNeXt(ResNet):
method __init__ (line 143) | def __init__(self, groups=1, base_width=4, **kwargs):
method make_res_layer (line 148) | def make_res_layer(self, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/ssd_vgg.py
class SSDVGG (line 13) | class SSDVGG(VGG, BaseModule):
method __init__ (line 50) | def __init__(self,
method init_weights (line 105) | def init_weights(self, pretrained=None):
method forward (line 108) | def forward(self, x):
class L2Norm (line 122) | class L2Norm(ssd_neck.L2Norm):
method __init__ (line 124) | def __init__(self, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/swin.py
class WindowMSA (line 22) | class WindowMSA(BaseModule):
method __init__ (line 41) | def __init__(self,
method init_weights (line 78) | def init_weights(self):
method forward (line 81) | def forward(self, x, mask=None):
method double_step_seq (line 122) | def double_step_seq(step1, len1, step2, len2):
class ShiftWindowMSA (line 128) | class ShiftWindowMSA(BaseModule):
method __init__ (line 151) | def __init__(self,
method forward (line 180) | def forward(self, query, hw_shape):
method window_reverse (line 256) | def window_reverse(self, windows, H, W):
method window_partition (line 272) | def window_partition(self, x):
class SwinBlock (line 288) | class SwinBlock(BaseModule):
method __init__ (line 313) | def __init__(self,
method forward (line 358) | def forward(self, x, hw_shape):
class SwinBlockSequence (line 381) | class SwinBlockSequence(BaseModule):
method __init__ (line 410) | def __init__(self,
method forward (line 455) | def forward(self, x, hw_shape):
class SwinTransformer (line 467) | class SwinTransformer(BaseModule):
method __init__ (line 524) | def __init__(self,
method train (line 642) | def train(self, mode=True):
method _freeze_stages (line 647) | def _freeze_stages(self):
method init_weights (line 669) | def init_weights(self):
method forward (line 744) | def forward(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/backbones/trident_resnet.py
class TridentConv (line 14) | class TridentConv(BaseModule):
method __init__ (line 33) | def __init__(self,
method extra_repr (line 61) | def extra_repr(self):
method forward (line 73) | def forward(self, inputs):
class TridentBottleneck (line 93) | class TridentBottleneck(Bottleneck):
method __init__ (line 106) | def __init__(self, trident_dilations, test_branch_idx, concat_output,
method forward (line 128) | def forward(self, x):
function make_trident_res_layer (line 182) | def make_trident_res_layer(block,
class TridentResNet (line 235) | class TridentResNet(ResNet):
method __init__ (line 256) | def __init__(self, depth, num_branch, test_branch_idx, trident_dilations,
FILE: DLTA_AI_app/mmdetection/mmdet/models/builder.py
function build_backbone (line 18) | def build_backbone(cfg):
function build_neck (line 23) | def build_neck(cfg):
function build_roi_extractor (line 28) | def build_roi_extractor(cfg):
function build_shared_head (line 33) | def build_shared_head(cfg):
function build_head (line 38) | def build_head(cfg):
function build_loss (line 43) | def build_loss(cfg):
function build_detector (line 48) | def build_detector(cfg, train_cfg=None, test_cfg=None):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/anchor_free_head.py
class AnchorFreeHead (line 18) | class AnchorFreeHead(BaseDenseHead, BBoxTestMixin):
method __init__ (line 46) | def __init__(self,
method _init_layers (line 107) | def _init_layers(self):
method _init_cls_convs (line 113) | def _init_cls_convs(self):
method _init_reg_convs (line 133) | def _init_reg_convs(self):
method _init_predictor (line 153) | def _init_predictor(self):
method _load_from_state_dict (line 159) | def _load_from_state_dict(self, state_dict, prefix, local_metadata, st...
method forward (line 197) | def forward(self, feats):
method forward_single (line 215) | def forward_single(self, x):
method loss (line 240) | def loss(self,
method get_targets (line 268) | def get_targets(self, points, gt_bboxes_list, gt_labels_list):
method _get_points_single (line 282) | def _get_points_single(self,
method get_points (line 310) | def get_points(self, featmap_sizes, dtype, device, flatten=False):
method aug_test (line 334) | def aug_test(self, feats, img_metas, rescale=False):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/anchor_head.py
class AnchorHead (line 17) | class AnchorHead(BaseDenseHead, BBoxTestMixin):
method __init__ (line 39) | def __init__(self,
method num_anchors (line 115) | def num_anchors(self):
method anchor_generator (line 122) | def anchor_generator(self):
method _init_layers (line 127) | def _init_layers(self):
method forward_single (line 135) | def forward_single(self, x):
method forward (line 152) | def forward(self, feats):
method get_anchors (line 171) | def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
method _get_targets_single (line 201) | def _get_targets_single(self,
method get_targets (line 299) | def get_targets(self,
method loss_single (line 402) | def loss_single(self, cls_score, bbox_pred, anchors, labels, label_wei...
method loss (line 453) | def loss(self,
method aug_test (line 522) | def aug_test(self, feats, img_metas, rescale=False):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/atss_head.py
class ATSSHead (line 14) | class ATSSHead(AnchorHead):
method __init__ (line 24) | def __init__(self,
method _init_layers (line 65) | def _init_layers(self):
method forward (line 109) | def forward(self, feats):
method forward_single (line 127) | def forward_single(self, x, scale):
method loss_single (line 156) | def loss_single(self, anchors, cls_score, bbox_pred, centerness, labels,
method loss (line 230) | def loss(self,
method centerness_target (line 305) | def centerness_target(self, anchors, gts):
method get_targets (line 322) | def get_targets(self,
method _get_target_single (line 386) | def _get_target_single(self,
method get_num_level_anchors_inside (line 496) | def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/autoassign_head.py
class CenterPrior (line 21) | class CenterPrior(nn.Module):
method __init__ (line 37) | def __init__(self,
method forward (line 49) | def forward(self, anchor_points_list, gt_bboxes, labels,
class AutoAssignHead (line 128) | class AutoAssignHead(FCOSHead):
method __init__ (line 148) | def __init__(self,
method init_weights (line 167) | def init_weights(self):
method forward_single (line 179) | def forward_single(self, x, scale, stride):
method get_pos_loss_single (line 207) | def get_pos_loss_single(self, cls_score, objectness, reg_loss, gt_labels,
method get_neg_loss_single (line 251) | def get_neg_loss_single(self, cls_score, objectness, gt_labels, ious,
method loss (line 307) | def loss(self,
method get_targets (line 439) | def get_targets(self, points, gt_bboxes_list):
method _get_target_single (line 468) | def _get_target_single(self, gt_bboxes, points):
method _get_points_single (line 506) | def _get_points_single(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/base_dense_head.py
class BaseDenseHead (line 12) | class BaseDenseHead(BaseModule, metaclass=ABCMeta):
method __init__ (line 15) | def __init__(self, init_cfg=None):
method init_weights (line 18) | def init_weights(self):
method loss (line 27) | def loss(self, **kwargs):
method get_bboxes (line 32) | def get_bboxes(self,
method _get_bboxes_single (line 109) | def _get_bboxes_single(self,
method _bbox_post_process (line 226) | def _bbox_post_process(self,
method forward_train (line 303) | def forward_train(self,
method simple_test (line 343) | def simple_test(self, feats, img_metas, rescale=False):
method onnx_export (line 363) | def onnx_export(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/base_mask_head.py
class BaseMaskHead (line 7) | class BaseMaskHead(BaseModule, metaclass=ABCMeta):
method __init__ (line 10) | def __init__(self, init_cfg):
method loss (line 14) | def loss(self, **kwargs):
method get_results (line 18) | def get_results(self, **kwargs):
method forward_train (line 22) | def forward_train(self,
method simple_test (line 73) | def simple_test(self,
method onnx_export (line 114) | def onnx_export(self, img, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/cascade_rpn_head.py
class AdaptiveConv (line 20) | class AdaptiveConv(BaseModule):
method __init__ (line 42) | def __init__(self,
method forward (line 79) | def forward(self, x, offset):
class StageCascadeRPNHead (line 96) | class StageCascadeRPNHead(RPNHead):
method __init__ (line 112) | def __init__(self,
method _init_layers (line 153) | def _init_layers(self):
method forward_single (line 164) | def forward_single(self, x, offset):
method forward (line 174) | def forward(self, feats, offset_list=None):
method _region_targets_single (line 180) | def _region_targets_single(self,
method region_targets (line 236) | def region_targets(self,
method get_targets (line 286) | def get_targets(self,
method anchor_offset (line 328) | def anchor_offset(self, anchor_list, anchor_strides, featmap_sizes):
method loss_single (line 406) | def loss_single(self, cls_score, bbox_pred, anchors, labels, label_wei...
method loss (line 436) | def loss(self,
method get_bboxes (line 505) | def get_bboxes(self,
method _get_bboxes_single (line 547) | def _get_bboxes_single(self,
method refine_bboxes (line 672) | def refine_bboxes(self, anchor_list, bbox_preds, img_metas):
class CascadeRPNHead (line 690) | class CascadeRPNHead(BaseDenseHead):
method __init__ (line 705) | def __init__(self, num_stages, stages, train_cfg, test_cfg, init_cfg=N...
method loss (line 720) | def loss(self):
method get_bboxes (line 724) | def get_bboxes(self):
method forward_train (line 728) | def forward_train(self,
method simple_test_rpn (line 773) | def simple_test_rpn(self, x, img_metas):
method aug_test_rpn (line 798) | def aug_test_rpn(self, x, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/centernet_head.py
class CenterNetHead (line 18) | class CenterNetHead(BaseDenseHead, BBoxTestMixin):
method __init__ (line 38) | def __init__(self,
method _build_head (line 64) | def _build_head(self, in_channel, feat_channel, out_channel):
method init_weights (line 72) | def init_weights(self):
method forward (line 81) | def forward(self, feats):
method forward_single (line 98) | def forward_single(self, feat):
method loss (line 116) | def loss(self,
method get_targets (line 181) | def get_targets(self, gt_bboxes, gt_labels, feat_shape, img_shape):
method get_bboxes (line 252) | def get_bboxes(self,
method _get_bboxes_single (line 297) | def _get_bboxes_single(self,
method decode_heatmap (line 351) | def decode_heatmap(self,
method _bboxes_nms (line 402) | def _bboxes_nms(self, bboxes, labels, cfg):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/centripetal_head.py
class CentripetalHead (line 13) | class CentripetalHead(CornerHead):
method __init__ (line 48) | def __init__(self,
method _init_centripetal_layers (line 73) | def _init_centripetal_layers(self):
method _init_layers (line 133) | def _init_layers(self):
method init_weights (line 141) | def init_weights(self):
method forward_single (line 157) | def forward_single(self, x, lvl_ind):
method loss (line 208) | def loss(self,
method loss_single (line 284) | def loss_single(self, tl_hmp, br_hmp, tl_off, br_off, tl_guiding_shift,
method get_bboxes (line 367) | def get_bboxes(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/corner_head.py
class BiCornerPool (line 21) | class BiCornerPool(BaseModule):
method __init__ (line 34) | def __init__(self,
method forward (line 64) | def forward(self, x):
class CornerHead (line 85) | class CornerHead(BaseDenseHead, BBoxTestMixin):
method __init__ (line 118) | def __init__(self,
method _make_layers (line 158) | def _make_layers(self, out_channels, in_channels=256, feat_channels=256):
method _init_corner_kpt_layers (line 165) | def _init_corner_kpt_layers(self):
method _init_corner_emb_layers (line 203) | def _init_corner_emb_layers(self):
method _init_layers (line 221) | def _init_layers(self):
method init_weights (line 230) | def init_weights(self):
method forward (line 248) | def forward(self, feats):
method forward_single (line 280) | def forward_single(self, x, lvl_ind, return_pool=False):
method get_targets (line 325) | def get_targets(self,
method loss (line 514) | def loss(self,
method loss_single (line 576) | def loss_single(self, tl_hmp, br_hmp, tl_emb, br_emb, tl_off, br_off,
method get_bboxes (line 655) | def get_bboxes(self,
method _get_bboxes_single (line 704) | def _get_bboxes_single(self,
method _bboxes_nms (line 777) | def _bboxes_nms(self, bboxes, labels, cfg):
method decode_heatmap (line 795) | def decode_heatmap(self,
method onnx_export (line 1031) | def onnx_export(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/ddod_head.py
class DDODHead (line 17) | class DDODHead(AnchorHead):
method __init__ (line 37) | def __init__(self,
method _init_layers (line 63) | def _init_layers(self):
method init_weights (line 110) | def init_weights(self):
method forward (line 121) | def forward(self, feats):
method forward_single (line 142) | def forward_single(self, x, scale):
method loss_cls_single (line 171) | def loss_cls_single(self, cls_score, labels, label_weights,
method loss_reg_single (line 198) | def loss_reg_single(self, anchors, bbox_pred, iou_pred, labels,
method calc_reweight_factor (line 275) | def calc_reweight_factor(self, labels_list):
method loss (line 295) | def loss(self,
method process_predictions_and_anchors (line 413) | def process_predictions_and_anchors(self, anchor_list, valid_flag_list,
method get_cls_targets (line 483) | def get_cls_targets(self,
method get_reg_targets (line 561) | def get_reg_targets(self,
method _get_target_single (line 634) | def _get_target_single(self,
method get_num_level_anchors_inside (line 761) | def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/deformable_detr_head.py
class DeformableDETRHead (line 17) | class DeformableDETRHead(DETRHead):
method __init__ (line 36) | def __init__(self,
method _init_layers (line 50) | def _init_layers(self):
method init_weights (line 83) | def init_weights(self):
method forward (line 97) | def forward(self, mlvl_feats, img_metas):
method loss (line 184) | def loss(self,
method get_bboxes (line 269) | def get_bboxes(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/dense_test_mixins.py
class BBoxTestMixin (line 14) | class BBoxTestMixin(object):
method simple_test_bboxes (line 17) | def simple_test_bboxes(self, feats, img_metas, rescale=False):
method aug_test_bboxes (line 41) | def aug_test_bboxes(self, feats, img_metas, rescale=False):
method simple_test_rpn (line 116) | def simple_test_rpn(self, x, img_metas):
method aug_test_rpn (line 133) | def aug_test_rpn(self, feats, img_metas):
method async_simple_test_rpn (line 169) | async def async_simple_test_rpn(self, x, img_metas):
method merge_aug_bboxes (line 179) | def merge_aug_bboxes(self, aug_bboxes, aug_scores, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/detr_head.py
class DETRHead (line 18) | class DETRHead(AnchorFreeHead):
method __init__ (line 52) | def __init__(self,
method _init_layers (line 152) | def _init_layers(self):
method init_weights (line 167) | def init_weights(self):
method _load_from_state_dict (line 172) | def _load_from_state_dict(self, state_dict, prefix, local_metadata, st...
method forward (line 202) | def forward(self, feats, img_metas):
method forward_single (line 226) | def forward_single(self, x, img_metas):
method loss (line 268) | def loss(self,
method loss_single (line 334) | def loss_single(self,
method get_targets (line 419) | def get_targets(self,
method _get_target_single (line 476) | def _get_target_single(self,
method forward_train (line 547) | def forward_train(self,
method get_bboxes (line 583) | def get_bboxes(self,
method _get_bboxes_single (line 628) | def _get_bboxes_single(self,
method simple_test_bboxes (line 685) | def simple_test_bboxes(self, feats, img_metas, rescale=False):
method forward_onnx (line 707) | def forward_onnx(self, feats, img_metas):
method forward_single_onnx (line 735) | def forward_single_onnx(self, x, img_metas):
method onnx_export (line 775) | def onnx_export(self, all_cls_scores_list, all_bbox_preds_list, img_me...
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/embedding_rpn_head.py
class EmbeddingRPNHead (line 11) | class EmbeddingRPNHead(BaseModule):
method __init__ (line 26) | def __init__(self,
method _init_layers (line 38) | def _init_layers(self):
method init_weights (line 44) | def init_weights(self):
method _decode_init_proposals (line 54) | def _decode_init_proposals(self, imgs, img_metas):
method forward_dummy (line 95) | def forward_dummy(self, img, img_metas):
method forward_train (line 102) | def forward_train(self, img, img_metas):
method simple_test_rpn (line 106) | def simple_test_rpn(self, img, img_metas):
method simple_test (line 110) | def simple_test(self, img, img_metas):
method aug_test_rpn (line 114) | def aug_test_rpn(self, feats, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/fcos_head.py
class FCOSHead (line 17) | class FCOSHead(AnchorFreeHead):
method __init__ (line 60) | def __init__(self,
method _init_layers (line 106) | def _init_layers(self):
method forward (line 112) | def forward(self, feats):
method forward_single (line 133) | def forward_single(self, x, scale, stride):
method loss (line 168) | def loss(self,
method get_targets (line 270) | def get_targets(self, points, gt_bboxes_list, gt_labels_list):
method _get_target_single (line 331) | def _get_target_single(self, gt_bboxes, gt_labels, points, regress_ran...
method centerness_target (line 415) | def centerness_target(self, pos_bbox_targets):
method _get_points_single (line 436) | def _get_points_single(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/fovea_head.py
class FeatureAlign (line 18) | class FeatureAlign(BaseModule):
method __init__ (line 20) | def __init__(self,
method forward (line 43) | def forward(self, x, shape):
class FoveaHead (line 50) | class FoveaHead(AnchorFreeHead):
method __init__ (line 55) | def __init__(self,
method _init_layers (line 81) | def _init_layers(self):
method forward_single (line 121) | def forward_single(self, x):
method loss (line 134) | def loss(self,
method get_targets (line 186) | def get_targets(self, gt_bbox_list, gt_label_list, featmap_sizes, poin...
method _get_target_single (line 208) | def _get_target_single(self,
method _get_bboxes_single (line 272) | def _get_bboxes_single(self,
method _bbox_decode (line 358) | def _bbox_decode(self, priors, bbox_pred, base_len, max_shape):
method _get_points_single (line 374) | def _get_points_single(self, *args, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/free_anchor_retina_head.py
class FreeAnchorRetinaHead (line 13) | class FreeAnchorRetinaHead(RetinaHead):
method __init__ (line 34) | def __init__(self,
method loss (line 54) | def loss(self,
method positive_bag_loss (line 221) | def positive_bag_loss(self, matched_cls_prob, matched_box_prob):
method negative_bag_loss (line 248) | def negative_bag_loss(self, cls_prob, box_prob):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/fsaf_head.py
class FSAFHead (line 15) | class FSAFHead(RetinaHead):
method __init__ (line 43) | def __init__(self, *args, score_threshold=None, init_cfg=None, **kwargs):
method forward_single (line 63) | def forward_single(self, x):
method _get_targets_single (line 80) | def _get_targets_single(self,
method loss (line 188) | def loss(self,
method calculate_pos_recall (line 317) | def calculate_pos_recall(self, cls_scores, labels_list, pos_inds):
method collect_loss_level_single (line 351) | def collect_loss_level_single(self, cls_loss, reg_loss, assigned_gt_inds,
method reweight_loss_single (line 382) | def reweight_loss_single(self, cls_loss, reg_loss, assigned_gt_inds,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/ga_retina_head.py
class GARetinaHead (line 11) | class GARetinaHead(GuidedAnchorHead):
method __init__ (line 14) | def __init__(self,
method _init_layers (line 45) | def _init_layers(self):
method forward_single (line 92) | def forward_single(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/ga_rpn_head.py
class GARPNHead (line 16) | class GARPNHead(GuidedAnchorHead):
method __init__ (line 19) | def __init__(self,
method _init_layers (line 34) | def _init_layers(self):
method forward_single (line 40) | def forward_single(self, x):
method loss (line 49) | def loss(self,
method _get_bboxes_single (line 72) | def _get_bboxes_single(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/gfl_head.py
class Integral (line 16) | class Integral(nn.Module):
method __init__ (line 29) | def __init__(self, reg_max=16):
method forward (line 35) | def forward(self, x):
class GFLHead (line 53) | class GFLHead(AnchorHead):
method __init__ (line 88) | def __init__(self,
method _init_layers (line 128) | def _init_layers(self):
method forward (line 161) | def forward(self, feats):
method forward_single (line 179) | def forward_single(self, x, scale):
method anchor_center (line 205) | def anchor_center(self, anchors):
method loss_single (line 218) | def loss_single(self, anchors, cls_score, bbox_pred, labels, label_wei...
method loss (line 307) | def loss(self,
method _get_bboxes_single (line 380) | def _get_bboxes_single(self,
method get_targets (line 473) | def get_targets(self,
method _get_target_single (line 537) | def _get_target_single(self,
method get_num_level_anchors_inside (line 643) | def get_num_level_anchors_inside(self, num_level_anchors, inside_flags):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/guided_anchor_head.py
class FeatureAdaption (line 16) | class FeatureAdaption(BaseModule):
method __init__ (line 31) | def __init__(self,
method forward (line 54) | def forward(self, x, shape):
class GuidedAnchorHead (line 61) | class GuidedAnchorHead(AnchorHead):
method __init__ (line 97) | def __init__(
method num_anchors (line 212) | def num_anchors(self):
method _init_layers (line 217) | def _init_layers(self):
method forward_single (line 233) | def forward_single(self, x):
method forward (line 246) | def forward(self, feats):
method get_sampled_approxs (line 249) | def get_sampled_approxs(self, featmap_sizes, img_metas, device='cuda'):
method get_anchors (line 299) | def get_anchors(self,
method _get_guided_anchors_single (line 350) | def _get_guided_anchors_single(self,
method ga_loc_targets (line 384) | def ga_loc_targets(self, gt_bboxes_list, featmap_sizes):
method _ga_shape_target_single (line 486) | def _ga_shape_target_single(self,
method ga_shape_targets (line 549) | def ga_shape_targets(self,
method loss_shape_single (line 615) | def loss_shape_single(self, shape_pred, bbox_anchors, bbox_gts,
method loss_loc_single (line 639) | def loss_loc_single(self, loc_pred, loc_target, loc_weight,
method loss (line 650) | def loss(self,
method get_bboxes (line 756) | def get_bboxes(self,
method _get_bboxes_single (line 800) | def _get_bboxes_single(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/lad_head.py
class LADHead (line 11) | class LADHead(PAAHead):
method get_label_assignment (line 16) | def get_label_assignment(self,
method forward_train (line 121) | def forward_train(self,
method loss (line 160) | def loss(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/ld_head.py
class LDHead (line 11) | class LDHead(GFLHead):
method __init__ (line 26) | def __init__(self,
method loss_single (line 38) | def loss_single(self, anchors, cls_score, bbox_pred, labels, label_wei...
method forward_train (line 142) | def forward_train(self,
method loss (line 185) | def loss(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/mask2former_head.py
class Mask2FormerHead (line 21) | class Mask2FormerHead(MaskFormerHead):
method __init__ (line 58) | def __init__(self,
method init_weights (line 137) | def init_weights(self):
method _get_target_single (line 148) | def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks,
method loss_single (line 218) | def loss_single(self, cls_scores, mask_preds, gt_labels_list,
method forward_head (line 309) | def forward_head(self, decoder_out, mask_feature, attn_mask_target_size):
method forward (line 351) | def forward(self, feats, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/maskformer_head.py
class MaskFormerHead (line 17) | class MaskFormerHead(AnchorFreeHead):
method __init__ (line 53) | def __init__(self,
method init_weights (line 128) | def init_weights(self):
method preprocess_gt (line 138) | def preprocess_gt(self, gt_labels_list, gt_masks_list, gt_semantic_segs,
method get_targets (line 174) | def get_targets(self, cls_scores_list, mask_preds_list, gt_labels_list,
method _get_target_single (line 218) | def _get_target_single(self, cls_score, mask_pred, gt_labels, gt_masks,
method loss (line 279) | def loss(self, all_cls_scores, all_mask_preds, gt_labels_list,
method loss_single (line 323) | def loss_single(self, cls_scores, mask_preds, gt_labels_list,
method forward (line 415) | def forward(self, feats, img_metas):
method forward_train (line 480) | def forward_train(self,
method simple_test (line 527) | def simple_test(self, feats, img_metas, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/nasfcos_head.py
class NASFCOSHead (line 12) | class NASFCOSHead(FCOSHead):
method __init__ (line 20) | def __init__(self, *args, init_cfg=None, **kwargs):
method _init_layers (line 39) | def _init_layers(self):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/paa_head.py
function levels_to_images (line 18) | def levels_to_images(mlvl_tensor):
class PAAHead (line 46) | class PAAHead(ATSSHead):
method __init__ (line 74) | def __init__(self,
method loss (line 87) | def loss(self,
method get_pos_loss (line 202) | def get_pos_loss(self, anchors, cls_score, bbox_pred, label, label_wei...
method paa_reassign (line 258) | def paa_reassign(self, pos_losses, label, label_weight, bbox_weight,
method gmm_separation_scheme (line 367) | def gmm_separation_scheme(self, gmm_assignment, scores, pos_inds_gmm):
method get_targets (line 402) | def get_targets(
method _get_targets_single (line 494) | def _get_targets_single(self,
method get_bboxes (line 521) | def get_bboxes(self,
method _get_bboxes_single (line 537) | def _get_bboxes_single(self,
method _bbox_post_process (line 620) | def _bbox_post_process(self,
method score_voting (line 693) | def score_voting(self, det_bboxes, det_labels, mlvl_bboxes,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/pisa_retinanet_head.py
class PISARetinaHead (line 12) | class PISARetinaHead(RetinaHead):
method loss (line 23) | def loss(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/pisa_ssd_head.py
class PISASSDHead (line 12) | class PISASSDHead(SSDHead):
method loss (line 14) | def loss(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/reppoints_head.py
class RepPointsHead (line 17) | class RepPointsHead(AnchorFreeHead):
method __init__ (line 36) | def __init__(self,
method _init_layers (line 126) | def _init_layers(self):
method points2bbox (line 170) | def points2bbox(self, pts, y_first=True):
method gen_grid_from_reg (line 221) | def gen_grid_from_reg(self, reg, previous_boxes):
method forward (line 255) | def forward(self, feats):
method forward_single (line 258) | def forward_single(self, x):
method get_points (line 304) | def get_points(self, featmap_sizes, img_metas, device):
method centers_to_bboxes (line 332) | def centers_to_bboxes(self, point_list):
method offset_to_pts (line 350) | def offset_to_pts(self, center_list, pred_list):
method _point_target_single (line 371) | def _point_target_single(self,
method get_targets (line 442) | def get_targets(self,
method loss_single (line 525) | def loss_single(self, cls_score, pts_pred_init, pts_pred_refine, labels,
method loss (line 563) | def loss(self,
method _get_bboxes_single (line 659) | def _get_bboxes_single(self,
method _bbox_decode (line 756) | def _bbox_decode(self, points, bbox_pred, stride, max_shape):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/retina_head.py
class RetinaHead (line 10) | class RetinaHead(AnchorHead):
method __init__ (line 29) | def __init__(self,
method _init_layers (line 61) | def _init_layers(self):
method forward_single (line 94) | def forward_single(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/retina_sepbn_head.py
class RetinaSepBNHead (line 10) | class RetinaSepBNHead(AnchorHead):
method __init__ (line 18) | def __init__(self,
method _init_layers (line 36) | def _init_layers(self):
method init_weights (line 78) | def init_weights(self):
method forward (line 89) | def forward(self, feats):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/rpn_head.py
class RPNHead (line 15) | class RPNHead(AnchorHead):
method __init__ (line 24) | def __init__(self,
method _init_layers (line 33) | def _init_layers(self):
method forward_single (line 62) | def forward_single(self, x):
method loss (line 70) | def loss(self,
method _get_bboxes_single (line 103) | def _get_bboxes_single(self,
method _bbox_post_process (line 189) | def _bbox_post_process(self, mlvl_scores, mlvl_bboxes, mlvl_valid_anch...
method onnx_export (line 237) | def onnx_export(self, x, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/sabl_retina_head.py
class SABLRetinaHead (line 21) | class SABLRetinaHead(BaseDenseHead, BBoxTestMixin):
method __init__ (line 54) | def __init__(self,
method num_anchors (line 158) | def num_anchors(self):
method _init_layers (line 163) | def _init_layers(self):
method forward_single (line 194) | def forward_single(self, x):
method forward (line 207) | def forward(self, feats):
method get_anchors (line 210) | def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
method get_target (line 231) | def get_target(self,
method _get_target_single (line 331) | def _get_target_single(self,
method loss_single (line 448) | def loss_single(self, cls_score, bbox_pred, labels, label_weights,
method loss (line 481) | def loss(self,
method get_bboxes (line 535) | def get_bboxes(self,
method _get_bboxes_single (line 567) | def _get_bboxes_single(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/solo_head.py
class SOLOHead (line 17) | class SOLOHead(BaseMaskHead):
method __init__ (line 50) | def __init__(
method _init_layers (line 103) | def _init_layers(self):
method resize_feats (line 133) | def resize_feats(self, feats):
method forward (line 155) | def forward(self, feats):
method loss (line 201) | def loss(self,
method _get_targets_single (line 291) | def _get_targets_single(self,
method get_results (line 439) | def get_results(self, mlvl_mask_preds, mlvl_cls_scores, img_metas,
method _get_results_single (line 488) | def _get_results_single(self, cls_scores, mask_preds, img_meta, cfg=No...
class DecoupledSOLOHead (line 590) | class DecoupledSOLOHead(SOLOHead):
method __init__ (line 599) | def __init__(self,
method _init_layers (line 623) | def _init_layers(self):
method forward (line 667) | def forward(self, feats):
method loss (line 728) | def loss(self,
method _get_targets_single (line 831) | def _get_targets_single(self,
method get_results (line 874) | def get_results(self,
method _get_results_single (line 939) | def _get_results_single(self, cls_scores, mask_preds_x, mask_preds_y,
class DecoupledSOLOLightHead (line 1063) | class DecoupledSOLOLightHead(DecoupledSOLOHead):
method __init__ (line 1073) | def __init__(self,
method _init_layers (line 1100) | def _init_layers(self):
method forward (line 1143) | def forward(self, feats):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/solov2_head.py
class MaskFeatModule (line 19) | class MaskFeatModule(BaseModule):
method __init__ (line 42) | def __init__(self,
method _init_layers (line 66) | def _init_layers(self):
method forward (line 134) | def forward(self, feats):
class SOLOV2Head (line 154) | class SOLOV2Head(SOLOHead):
method __init__ (line 170) | def __init__(self,
method _init_layers (line 211) | def _init_layers(self):
method forward (line 254) | def forward(self, feats):
method _get_targets_single (line 294) | def _get_targets_single(self,
method loss (line 462) | def loss(self,
method get_results (line 584) | def get_results(self, mlvl_kernel_preds, mlvl_cls_scores, mask_feats,
method _get_results_single (line 645) | def _get_results_single(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/ssd_head.py
class SSDHead (line 19) | class SSDHead(AnchorHead):
method __init__ (line 50) | def __init__(self,
method num_anchors (line 116) | def num_anchors(self):
method _init_layers (line 125) | def _init_layers(self):
method forward (line 196) | def forward(self, feats):
method loss_single (line 220) | def loss_single(self, cls_score, bbox_pred, anchor, labels, label_weig...
method loss (line 279) | def loss(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/tood_head.py
class TaskDecomposition (line 17) | class TaskDecomposition(nn.Module):
method __init__ (line 28) | def __init__(self,
method init_weights (line 58) | def init_weights(self):
method forward (line 64) | def forward(self, feat, avg_feat=None):
class TOODHead (line 90) | class TOODHead(ATSSHead):
method __init__ (line 113) | def __init__(self,
method _init_layers (line 142) | def _init_layers(self):
method init_weights (line 191) | def init_weights(self):
method forward (line 210) | def forward(self, feats):
method deform_sampling (line 284) | def deform_sampling(self, feat, offset):
method anchor_center (line 297) | def anchor_center(self, anchors):
method loss_single (line 310) | def loss_single(self, anchors, cls_score, bbox_pred, labels, label_wei...
method loss (line 383) | def loss(self,
method _get_bboxes_single (line 462) | def _get_bboxes_single(self,
method get_targets (line 549) | def get_targets(self,
method _get_target_single (line 664) | def _get_target_single(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/vfnet_head.py
class VFNetHead (line 22) | class VFNetHead(ATSSHead, FCOSHead):
method __init__ (line 68) | def __init__(self,
method num_anchors (line 176) | def num_anchors(self):
method anchor_generator (line 186) | def anchor_generator(self):
method _init_layers (line 191) | def _init_layers(self):
method forward (line 226) | def forward(self, feats):
method forward_single (line 248) | def forward_single(self, x, scale, scale_refine, stride, reg_denom):
method star_dcn_offset (line 309) | def star_dcn_offset(self, bbox_pred, gradient_mul, stride):
method loss (line 351) | def loss(self,
method get_targets (line 500) | def get_targets(self, cls_scores, mlvl_points, gt_bboxes, gt_labels,
method _get_target_single (line 535) | def _get_target_single(self, *args, **kwargs):
method get_fcos_targets (line 542) | def get_fcos_targets(self, points, gt_bboxes_list, gt_labels_list):
method get_anchors (line 568) | def get_anchors(self, featmap_sizes, img_metas, device='cuda'):
method get_atss_targets (line 598) | def get_atss_targets(self,
method transform_bbox_targets (line 677) | def transform_bbox_targets(self, decoded_bboxes, mlvl_points, num_imgs):
method _load_from_state_dict (line 703) | def _load_from_state_dict(self, state_dict, prefix, local_metadata, st...
method _get_points_single (line 709) | def _get_points_single(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/yolact_head.py
class YOLACTHead (line 16) | class YOLACTHead(AnchorHead):
method __init__ (line 44) | def __init__(self,
method _init_layers (line 89) | def _init_layers(self):
method forward_single (line 117) | def forward_single(self, x):
method loss (line 140) | def loss(self,
method loss_single_OHEM (line 261) | def loss_single_OHEM(self, cls_score, bbox_pred, anchors, labels,
method get_bboxes (line 297) | def get_bboxes(self,
method _get_bboxes_single (line 358) | def _get_bboxes_single(self,
class YOLACTSegmHead (line 457) | class YOLACTSegmHead(BaseModule):
method __init__ (line 472) | def __init__(self,
method _init_layers (line 490) | def _init_layers(self):
method forward (line 495) | def forward(self, x):
method loss (line 509) | def loss(self, segm_pred, gt_masks, gt_labels):
method get_targets (line 542) | def get_targets(self, segm_pred, gt_masks, gt_labels):
method simple_test (line 572) | def simple_test(self, feats, img_metas, rescale=False):
class YOLACTProtonet (line 580) | class YOLACTProtonet(BaseModule):
method __init__ (line 599) | def __init__(self,
method _init_layers (line 625) | def _init_layers(self):
method forward_dummy (line 662) | def forward_dummy(self, x):
method forward (line 666) | def forward(self, x, coeff_pred, bboxes, img_meta, sampling_results=No...
method loss (line 743) | def loss(self, mask_pred, gt_masks, gt_bboxes, img_meta, sampling_resu...
method get_targets (line 816) | def get_targets(self, mask_pred, gt_masks, pos_assigned_gt_inds):
method get_seg_masks (line 841) | def get_seg_masks(self, mask_pred, label_pred, img_meta, rescale):
method crop (line 876) | def crop(self, masks, boxes, padding=1):
method sanitize_coordinates (line 910) | def sanitize_coordinates(self, x1, x2, img_size, padding=0, cast=True):
method simple_test (line 941) | def simple_test(self,
class InterpolateModule (line 1004) | class InterpolateModule(BaseModule):
method __init__ (line 1010) | def __init__(self, *args, init_cfg=None, **kwargs):
method forward (line 1016) | def forward(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/yolo_head.py
class YOLOV3Head (line 23) | class YOLOV3Head(BaseDenseHead, BBoxTestMixin):
method __init__ (line 51) | def __init__(self,
method anchor_generator (line 125) | def anchor_generator(self):
method num_anchors (line 132) | def num_anchors(self):
method num_levels (line 142) | def num_levels(self):
method num_attrib (line 146) | def num_attrib(self):
method _init_layers (line 152) | def _init_layers(self):
method init_weights (line 170) | def init_weights(self):
method forward (line 186) | def forward(self, feats):
method get_bboxes (line 209) | def get_bboxes(self,
method loss (line 301) | def loss(self,
method loss_single (line 352) | def loss_single(self, pred_map, target_map, neg_map):
method get_targets (line 397) | def get_targets(self, anchor_list, responsible_flag_list, gt_bboxes_list,
method _get_targets_single (line 433) | def _get_targets_single(self, anchors, responsible_flags, gt_bboxes,
method aug_test (line 494) | def aug_test(self, feats, img_metas, rescale=False):
method onnx_export (line 513) | def onnx_export(self, pred_maps, img_metas, with_nms=True):
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/yolof_head.py
function levels_to_images (line 15) | def levels_to_images(mlvl_tensor):
class YOLOFHead (line 43) | class YOLOFHead(AnchorHead):
method __init__ (line 56) | def __init__(self,
method _init_layers (line 68) | def _init_layers(self):
method init_weights (line 108) | def init_weights(self):
method forward_single (line 119) | def forward_single(self, feature):
method loss (line 137) | def loss(self,
method get_targets (line 224) | def get_targets(self,
method _get_targets_single (line 311) | def _get_targets_single(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/dense_heads/yolox_head.py
class YOLOXHead (line 22) | class YOLOXHead(BaseDenseHead, BBoxTestMixin):
method __init__ (line 53) | def __init__(self,
method _init_layers (line 131) | def _init_layers(self):
method _build_stacked_convs (line 145) | def _build_stacked_convs(self):
method _build_predictor (line 169) | def _build_predictor(self):
method init_weights (line 176) | def init_weights(self):
method forward_single (line 185) | def forward_single(self, x, cls_convs, reg_convs, conv_cls, conv_reg,
method forward (line 198) | def forward(self, feats):
method get_bboxes (line 217) | def get_bboxes(self,
method _bbox_decode (line 301) | def _bbox_decode(self, priors, bbox_preds):
method _bboxes_nms (line 313) | def _bboxes_nms(self, cls_scores, bboxes, score_factor, cfg):
method loss (line 328) | def loss(self,
method _get_target_single (line 426) | def _get_target_single(self, cls_preds, objectness, priors, decoded_bb...
method _get_l1_target (line 488) | def _get_l1_target(self, l1_target, gt_bboxes, priors, eps=1e-8):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/atss.py
class ATSS (line 7) | class ATSS(SingleStageDetector):
method __init__ (line 10) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/autoassign.py
class AutoAssign (line 7) | class AutoAssign(SingleStageDetector):
method __init__ (line 11) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/base.py
class BaseDetector (line 14) | class BaseDetector(BaseModule, metaclass=ABCMeta):
method __init__ (line 17) | def __init__(self, init_cfg=None):
method with_neck (line 22) | def with_neck(self):
method with_shared_head (line 29) | def with_shared_head(self):
method with_bbox (line 34) | def with_bbox(self):
method with_mask (line 40) | def with_mask(self):
method extract_feat (line 46) | def extract_feat(self, imgs):
method extract_feats (line 50) | def extract_feats(self, imgs):
method forward_train (line 63) | def forward_train(self, imgs, img_metas, **kwargs):
method async_simple_test (line 82) | async def async_simple_test(self, img, img_metas, **kwargs):
method simple_test (line 86) | def simple_test(self, img, img_metas, **kwargs):
method aug_test (line 90) | def aug_test(self, imgs, img_metas, **kwargs):
method aforward_test (line 94) | async def aforward_test(self, *, img, img_metas, **kwargs):
method forward_test (line 112) | def forward_test(self, imgs, img_metas, **kwargs):
method forward (line 157) | def forward(self, img, img_metas, return_loss=True, **kwargs):
method _parse_losses (line 176) | def _parse_losses(self, losses):
method train_step (line 221) | def train_step(self, data, optimizer):
method val_step (line 256) | def val_step(self, data, optimizer=None):
method show_result (line 271) | def show_result(self,
method onnx_export (line 358) | def onnx_export(self, img, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/cascade_rcnn.py
class CascadeRCNN (line 7) | class CascadeRCNN(TwoStageDetector):
method __init__ (line 11) | def __init__(self,
method show_result (line 30) | def show_result(self, data, result, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/centernet.py
class CenterNet (line 11) | class CenterNet(SingleStageDetector):
method __init__ (line 17) | def __init__(self,
method merge_aug_results (line 28) | def merge_aug_results(self, aug_results, with_nms):
method aug_test (line 54) | def aug_test(self, imgs, img_metas, rescale=True):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/cornernet.py
class CornerNet (line 10) | class CornerNet(SingleStageDetector):
method __init__ (line 17) | def __init__(self,
method merge_aug_results (line 28) | def merge_aug_results(self, aug_results, img_metas):
method aug_test (line 62) | def aug_test(self, imgs, img_metas, rescale=False):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/ddod.py
class DDOD (line 7) | class DDOD(SingleStageDetector):
method __init__ (line 10) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/deformable_detr.py
class DeformableDETR (line 7) | class DeformableDETR(DETR):
method __init__ (line 9) | def __init__(self, *args, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/detr.py
class DETR (line 11) | class DETR(SingleStageDetector):
method __init__ (line 15) | def __init__(self,
method forward_dummy (line 27) | def forward_dummy(self, img):
method onnx_export (line 50) | def onnx_export(self, img, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/fast_rcnn.py
class FastRCNN (line 7) | class FastRCNN(TwoStageDetector):
method __init__ (line 10) | def __init__(self,
method forward_test (line 27) | def forward_test(self, imgs, img_metas, proposals, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/faster_rcnn.py
class FasterRCNN (line 7) | class FasterRCNN(TwoStageDetector):
method __init__ (line 10) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/fcos.py
class FCOS (line 7) | class FCOS(SingleStageDetector):
method __init__ (line 10) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/fovea.py
class FOVEA (line 7) | class FOVEA(SingleStageDetector):
method __init__ (line 10) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/fsaf.py
class FSAF (line 7) | class FSAF(SingleStageDetector):
method __init__ (line 10) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/gfl.py
class GFL (line 7) | class GFL(SingleStageDetector):
method __init__ (line 9) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/grid_rcnn.py
class GridRCNN (line 7) | class GridRCNN(TwoStageDetector):
method __init__ (line 15) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/htc.py
class HybridTaskCascade (line 7) | class HybridTaskCascade(CascadeRCNN):
method __init__ (line 10) | def __init__(self, **kwargs):
method with_semantic (line 14) | def with_semantic(self):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/kd_one_stage.py
class KnowledgeDistillationSingleStageDetector (line 14) | class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
method __init__ (line 25) | def __init__(self,
method forward_train (line 46) | def forward_train(self,
method cuda (line 78) | def cuda(self, device=None):
method train (line 84) | def train(self, mode=True):
method __setattr__ (line 92) | def __setattr__(self, name, value):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/lad.py
class LAD (line 11) | class LAD(KnowledgeDistillationSingleStageDetector):
method __init__ (line 14) | def __init__(self,
method with_teacher_neck (line 42) | def with_teacher_neck(self):
method extract_teacher_feat (line 47) | def extract_teacher_feat(self, img):
method forward_train (line 54) | def forward_train(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/mask2former.py
class Mask2Former (line 7) | class Mask2Former(MaskFormer):
method __init__ (line 12) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/mask_rcnn.py
class MaskRCNN (line 7) | class MaskRCNN(TwoStageDetector):
method __init__ (line 10) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/mask_scoring_rcnn.py
class MaskScoringRCNN (line 7) | class MaskScoringRCNN(TwoStageDetector):
method __init__ (line 13) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/maskformer.py
class MaskFormer (line 14) | class MaskFormer(SingleStageDetector):
method __init__ (line 19) | def __init__(self,
method forward_dummy (line 52) | def forward_dummy(self, img, img_metas):
method forward_train (line 70) | def forward_train(self,
method simple_test (line 113) | def simple_test(self, imgs, img_metas, **kwargs):
method aug_test (line 181) | def aug_test(self, imgs, img_metas, **kwargs):
method onnx_export (line 184) | def onnx_export(self, img, img_metas):
method _show_pan_result (line 187) | def _show_pan_result(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/nasfcos.py
class NASFCOS (line 7) | class NASFCOS(SingleStageDetector):
method __init__ (line 13) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/paa.py
class PAA (line 7) | class PAA(SingleStageDetector):
method __init__ (line 10) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/panoptic_fpn.py
class PanopticFPN (line 7) | class PanopticFPN(TwoStagePanopticSegmentor):
method __init__ (line 11) | def __init__(
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/panoptic_two_stage_segmentor.py
class TwoStagePanopticSegmentor (line 14) | class TwoStagePanopticSegmentor(TwoStageDetector):
method __init__ (line 21) | def __init__(
method with_semantic_head (line 52) | def with_semantic_head(self):
method with_panoptic_fusion_head (line 57) | def with_panoptic_fusion_head(self):
method forward_dummy (line 61) | def forward_dummy(self, img):
method forward_train (line 69) | def forward_train(self,
method simple_test_mask (line 108) | def simple_test_mask(self,
method simple_test (line 169) | def simple_test(self, img, img_metas, proposals=None, rescale=False):
method show_result (line 208) | def show_result(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/point_rend.py
class PointRend (line 7) | class PointRend(TwoStageDetector):
method __init__ (line 15) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/queryinst.py
class QueryInst (line 7) | class QueryInst(SparseRCNN):
method __init__ (line 11) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/reppoints_detector.py
class RepPointsDetector (line 7) | class RepPointsDetector(SingleStageDetector):
method __init__ (line 14) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/retinanet.py
class RetinaNet (line 7) | class RetinaNet(SingleStageDetector):
method __init__ (line 10) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/rpn.py
class RPN (line 15) | class RPN(BaseDetector):
method __init__ (line 18) | def __init__(self,
method extract_feat (line 40) | def extract_feat(self, img):
method forward_dummy (line 55) | def forward_dummy(self, img):
method forward_train (line 61) | def forward_train(self,
method simple_test (line 92) | def simple_test(self, img, img_metas, rescale=False):
method aug_test (line 118) | def aug_test(self, imgs, img_metas, rescale=False):
method show_result (line 143) | def show_result(self, data, result, top_k=20, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/scnet.py
class SCNet (line 7) | class SCNet(CascadeRCNN):
method __init__ (line 10) | def __init__(self, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/single_stage.py
class SingleStageDetector (line 12) | class SingleStageDetector(BaseDetector):
method __init__ (line 19) | def __init__(self,
method extract_feat (line 41) | def extract_feat(self, img):
method forward_dummy (line 48) | def forward_dummy(self, img):
method forward_train (line 57) | def forward_train(self,
method simple_test (line 87) | def simple_test(self, img, img_metas, rescale=False):
method aug_test (line 110) | def aug_test(self, imgs, img_metas, rescale=False):
method onnx_export (line 141) | def onnx_export(self, img, img_metas, with_nms=True):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/single_stage_instance_seg.py
class SingleStageInstanceSegmentor (line 17) | class SingleStageInstanceSegmentor(BaseDetector):
method __init__ (line 20) | def __init__(self,
method extract_feat (line 56) | def extract_feat(self, img):
method forward_dummy (line 63) | def forward_dummy(self, img):
method forward_train (line 71) | def forward_train(self,
method simple_test (line 142) | def simple_test(self, img, img_metas, rescale=False):
method format_results (line 184) | def format_results(self, results):
method aug_test (line 252) | def aug_test(self, imgs, img_metas, rescale=False):
method show_result (line 255) | def show_result(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/solo.py
class SOLO (line 7) | class SOLO(SingleStageInstanceSegmentor):
method __init__ (line 13) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/solov2.py
class SOLOv2 (line 7) | class SOLOv2(SingleStageInstanceSegmentor):
method __init__ (line 13) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/sparse_rcnn.py
class SparseRCNN (line 7) | class SparseRCNN(TwoStageDetector):
method __init__ (line 11) | def __init__(self, *args, **kwargs):
method forward_train (line 16) | def forward_train(self,
method simple_test (line 67) | def simple_test(self, img, img_metas, rescale=False):
method forward_dummy (line 93) | def forward_dummy(self, img):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/tood.py
class TOOD (line 7) | class TOOD(SingleStageDetector):
method __init__ (line 11) | def __init__(self,
method set_epoch (line 22) | def set_epoch(self, epoch):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/trident_faster_rcnn.py
class TridentFasterRCNN (line 7) | class TridentFasterRCNN(FasterRCNN):
method __init__ (line 10) | def __init__(self,
method simple_test (line 34) | def simple_test(self, img, img_metas, proposals=None, rescale=False):
method aug_test (line 49) | def aug_test(self, imgs, img_metas, rescale=False):
method forward_train (line 62) | def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/two_stage.py
class TwoStageDetector (line 11) | class TwoStageDetector(BaseDetector):
method __init__ (line 18) | def __init__(self,
method with_rpn (line 56) | def with_rpn(self):
method with_roi_head (line 61) | def with_roi_head(self):
method extract_feat (line 65) | def extract_feat(self, img):
method forward_dummy (line 72) | def forward_dummy(self, img):
method forward_train (line 90) | def forward_train(self,
method async_simple_test (line 155) | async def async_simple_test(self,
method simple_test (line 173) | def simple_test(self, img, img_metas, proposals=None, rescale=False):
method aug_test (line 186) | def aug_test(self, imgs, img_metas, rescale=False):
method onnx_export (line 197) | def onnx_export(self, img, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/vfnet.py
class VFNet (line 7) | class VFNet(SingleStageDetector):
method __init__ (line 11) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/yolact.py
class YOLACT (line 10) | class YOLACT(SingleStageDetector):
method __init__ (line 13) | def __init__(self,
method forward_dummy (line 28) | def forward_dummy(self, img):
method forward_train (line 38) | def forward_train(self,
method simple_test (line 97) | def simple_test(self, img, img_metas, rescale=False):
method aug_test (line 117) | def aug_test(self, imgs, img_metas, rescale=False):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/yolo.py
class YOLOV3 (line 10) | class YOLOV3(SingleStageDetector):
method __init__ (line 12) | def __init__(self,
method onnx_export (line 23) | def onnx_export(self, img, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/yolof.py
class YOLOF (line 7) | class YOLOF(SingleStageDetector):
method __init__ (line 11) | def __init__(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/detectors/yolox.py
class YOLOX (line 15) | class YOLOX(SingleStageDetector):
method __init__ (line 46) | def __init__(self,
method forward_train (line 69) | def forward_train(self,
method _preprocess (line 105) | def _preprocess(self, img, gt_bboxes):
method _random_resize (line 119) | def _random_resize(self, device):
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/accuracy.py
function accuracy (line 7) | def accuracy(pred, target, topk=1, thresh=None):
class Accuracy (line 54) | class Accuracy(nn.Module):
method __init__ (line 56) | def __init__(self, topk=(1, ), thresh=None):
method forward (line 69) | def forward(self, pred, target):
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/ae_loss.py
function ae_loss_per_image (line 11) | def ae_loss_per_image(tl_preds, br_preds, match):
class AssociativeEmbeddingLoss (line 75) | class AssociativeEmbeddingLoss(nn.Module):
method __init__ (line 88) | def __init__(self, pull_weight=0.25, push_weight=0.25):
method forward (line 93) | def forward(self, pred, target, match):
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/balanced_l1_loss.py
function balanced_l1_loss (line 13) | def balanced_l1_loss(pred,
class BalancedL1Loss (line 57) | class BalancedL1Loss(nn.Module):
method __init__ (line 74) | def __init__(self,
method forward (line 87) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/cross_entropy_loss.py
function cross_entropy (line 12) | def cross_entropy(pred,
function _expand_onehot_labels (line 64) | def _expand_onehot_labels(labels, label_weights, label_channels, ignore_...
function binary_cross_entropy (line 85) | def binary_cross_entropy(pred,
function mask_cross_entropy (line 148) | def mask_cross_entropy(pred,
class CrossEntropyLoss (line 201) | class CrossEntropyLoss(nn.Module):
method __init__ (line 203) | def __init__(self,
method extra_repr (line 252) | def extra_repr(self):
method forward (line 257) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/dice_loss.py
function dice_loss (line 9) | def dice_loss(pred,
class DiceLoss (line 66) | class DiceLoss(nn.Module):
method __init__ (line 68) | def __init__(self,
method forward (line 103) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/focal_loss.py
function py_sigmoid_focal_loss (line 12) | def py_sigmoid_focal_loss(pred,
function py_focal_loss_with_prob (line 60) | def py_focal_loss_with_prob(pred,
function sigmoid_focal_loss (line 113) | def sigmoid_focal_loss(pred,
class FocalLoss (line 160) | class FocalLoss(nn.Module):
method __init__ (line 162) | def __init__(self,
method forward (line 196) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/gaussian_focal_loss.py
function gaussian_focal_loss (line 11) | def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
class GaussianFocalLoss (line 33) | class GaussianFocalLoss(nn.Module):
method __init__ (line 50) | def __init__(self,
method forward (line 61) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/gfocal_loss.py
function quality_focal_loss (line 12) | def quality_focal_loss(pred, target, beta=2.0):
function quality_focal_loss_with_prob (line 56) | def quality_focal_loss_with_prob(pred, target, beta=2.0):
function distribution_focal_loss (line 103) | def distribution_focal_loss(pred, label):
class QualityFocalLoss (line 128) | class QualityFocalLoss(nn.Module):
method __init__ (line 146) | def __init__(self,
method forward (line 160) | def forward(self,
class DistributionFocalLoss (line 203) | class DistributionFocalLoss(nn.Module):
method __init__ (line 213) | def __init__(self, reduction='mean', loss_weight=1.0):
method forward (line 218) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/ghm_loss.py
function _expand_onehot_labels (line 10) | def _expand_onehot_labels(labels, label_weights, label_channels):
class GHMC (line 23) | class GHMC(nn.Module):
method __init__ (line 39) | def __init__(self,
method forward (line 60) | def forward(self,
class GHMR (line 122) | class GHMR(nn.Module):
method __init__ (line 138) | def __init__(self,
method forward (line 158) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/iou_loss.py
function iou_loss (line 16) | def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
function bounded_iou_loss (line 55) | def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
function giou_loss (line 102) | def giou_loss(pred, target, eps=1e-7):
function diou_loss (line 122) | def diou_loss(pred, target, eps=1e-7):
function ciou_loss (line 177) | def ciou_loss(pred, target, eps=1e-7):
class IoULoss (line 241) | class IoULoss(nn.Module):
method __init__ (line 256) | def __init__(self,
method forward (line 275) | def forward(self,
class BoundedIoULoss (line 322) | class BoundedIoULoss(nn.Module):
method __init__ (line 324) | def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1...
method forward (line 331) | def forward(self,
class GIoULoss (line 358) | class GIoULoss(nn.Module):
method __init__ (line 360) | def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
method forward (line 366) | def forward(self,
class DIoULoss (line 398) | class DIoULoss(nn.Module):
method __init__ (line 400) | def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
method forward (line 406) | def forward(self,
class CIoULoss (line 438) | class CIoULoss(nn.Module):
method __init__ (line 440) | def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
method forward (line 446) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/kd_loss.py
function knowledge_distillation_kl_div_loss (line 12) | def knowledge_distillation_kl_div_loss(pred,
class KnowledgeDistillationKLDivLoss (line 40) | class KnowledgeDistillationKLDivLoss(nn.Module):
method __init__ (line 49) | def __init__(self, reduction='mean', loss_weight=1.0, T=10):
method forward (line 56) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/mse_loss.py
function mse_loss (line 10) | def mse_loss(pred, target):
class MSELoss (line 16) | class MSELoss(nn.Module):
method __init__ (line 25) | def __init__(self, reduction='mean', loss_weight=1.0):
method forward (line 30) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/pisa_loss.py
function isr_p (line 9) | def isr_p(cls_score,
function carl_loss (line 123) | def carl_loss(cls_score,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/seesaw_loss.py
function seesaw_ce_loss (line 12) | def seesaw_ce_loss(cls_score,
class SeesawLoss (line 80) | class SeesawLoss(nn.Module):
method __init__ (line 103) | def __init__(self,
method _split_cls_score (line 138) | def _split_cls_score(self, cls_score):
method get_cls_channels (line 145) | def get_cls_channels(self, num_classes):
method get_activation (line 157) | def get_activation(self, cls_score):
method get_accuracy (line 177) | def get_accuracy(self, cls_score, labels):
method forward (line 199) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/smooth_l1_loss.py
function smooth_l1_loss (line 12) | def smooth_l1_loss(pred, target, beta=1.0):
function l1_loss (line 37) | def l1_loss(pred, target):
class SmoothL1Loss (line 56) | class SmoothL1Loss(nn.Module):
method __init__ (line 67) | def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
method forward (line 73) | def forward(self,
class L1Loss (line 108) | class L1Loss(nn.Module):
method __init__ (line 117) | def __init__(self, reduction='mean', loss_weight=1.0):
method forward (line 122) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/utils.py
function reduce_loss (line 9) | def reduce_loss(loss, reduction):
function weight_reduce_loss (line 30) | def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=N...
function weighted_loss (line 62) | def weighted_loss(loss_func):
FILE: DLTA_AI_app/mmdetection/mmdet/models/losses/varifocal_loss.py
function varifocal_loss (line 11) | def varifocal_loss(pred,
class VarifocalLoss (line 60) | class VarifocalLoss(nn.Module):
method __init__ (line 62) | def __init__(self,
method forward (line 97) | def forward(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/bfp.py
class BFP (line 11) | class BFP(BaseModule):
method __init__ (line 33) | def __init__(self,
method forward (line 70) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/channel_mapper.py
class ChannelMapper (line 10) | class ChannelMapper(BaseModule):
method __init__ (line 46) | def __init__(self,
method forward (line 90) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/ct_resnet_neck.py
class CTResNetNeck (line 12) | class CTResNetNeck(BaseModule):
method __init__ (line 24) | def __init__(self,
method _make_deconv_layer (line 38) | def _make_deconv_layer(self, num_deconv_filters, num_deconv_kernels):
method init_weights (line 64) | def init_weights(self):
method forward (line 91) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/dilated_encoder.py
class Bottleneck (line 10) | class Bottleneck(nn.Module):
method __init__ (line 24) | def __init__(self,
method forward (line 42) | def forward(self, x):
class DilatedEncoder (line 52) | class DilatedEncoder(nn.Module):
method __init__ (line 68) | def __init__(self, in_channels, out_channels, block_mid_channels,
method _init_layers (line 78) | def _init_layers(self):
method init_weights (line 95) | def init_weights(self):
method forward (line 106) | def forward(self, feature):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/dyhead.py
class DyDCNv2 (line 17) | class DyDCNv2(nn.Module):
method __init__ (line 32) | def __init__(self,
method forward (line 45) | def forward(self, x, offset, mask):
class DyHeadBlock (line 53) | class DyHeadBlock(nn.Module):
method __init__ (line 69) | def __init__(self,
method _init_weights (line 91) | def _init_weights(self):
method forward (line 98) | def forward(self, x):
class DyHead (line 132) | class DyHead(BaseModule):
method __init__ (line 148) | def __init__(self,
method forward (line 172) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/fpg.py
class Transition (line 10) | class Transition(BaseModule):
method __init__ (line 18) | def __init__(self, in_channels, out_channels, init_cfg=None):
method forward (line 23) | def forward(x):
class UpInterpolationConv (line 27) | class UpInterpolationConv(Transition):
method __init__ (line 43) | def __init__(self,
method forward (line 63) | def forward(self, x):
class LastConv (line 73) | class LastConv(Transition):
method __init__ (line 83) | def __init__(self,
method forward (line 99) | def forward(self, inputs):
class FPG (line 105) | class FPG(BaseModule):
method __init__ (line 150) | def __init__(self,
method build_trans (line 317) | def build_trans(self, cfg, in_channels, out_channels, **extra_args):
method fuse (line 323) | def fuse(self, fuse_dict):
method forward (line 333) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/fpn.py
class FPN (line 11) | class FPN(BaseModule):
method __init__ (line 62) | def __init__(self,
method forward (line 152) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/fpn_carafe.py
class FPN_CARAFE (line 11) | class FPN_CARAFE(BaseModule):
method __init__ (line 37) | def __init__(self,
method init_weights (line 209) | def init_weights(self):
method slice_as (line 219) | def slice_as(self, src, dst):
method tensor_add (line 239) | def tensor_add(self, a, b):
method forward (line 247) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/hrfpn.py
class HRFPN (line 13) | class HRFPN(BaseModule):
method __init__ (line 33) | def __init__(self,
method forward (line 77) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/nas_fpn.py
class NASFPN (line 11) | class NASFPN(BaseModule):
method __init__ (line 33) | def __init__(self,
method forward (line 127) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/nasfcos_fpn.py
class NASFCOS_FPN (line 12) | class NASFCOS_FPN(BaseModule):
method __init__ (line 35) | def __init__(self,
method forward (line 123) | def forward(self, inputs):
method init_weights (line 157) | def init_weights(self):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/pafpn.py
class PAFPN (line 12) | class PAFPN(FPN):
method __init__ (line 46) | def __init__(self,
method forward (line 100) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/rfp.py
class ASPP (line 12) | class ASPP(BaseModule):
method __init__ (line 26) | def __init__(self,
method forward (line 48) | def forward(self, x):
class RFP (line 60) | class RFP(FPN):
method __init__ (line 78) | def __init__(self,
method init_weights (line 105) | def init_weights(self):
method forward (line 117) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/ssd_neck.py
class SSDNeck (line 11) | class SSDNeck(BaseModule):
method __init__ (line 33) | def __init__(self,
method forward (line 93) | def forward(self, inputs):
class L2Norm (line 106) | class L2Norm(nn.Module):
method __init__ (line 108) | def __init__(self, n_dims, scale=20., eps=1e-10):
method forward (line 123) | def forward(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/yolo_neck.py
class DetectionBlock (line 12) | class DetectionBlock(BaseModule):
method __init__ (line 35) | def __init__(self,
method forward (line 55) | def forward(self, x):
class YOLOV3Neck (line 65) | class YOLOV3Neck(BaseModule):
method __init__ (line 92) | def __init__(self,
method forward (line 120) | def forward(self, feats):
FILE: DLTA_AI_app/mmdetection/mmdet/models/necks/yolox_pafpn.py
class YOLOXPAFPN (line 14) | class YOLOXPAFPN(BaseModule):
method __init__ (line 35) | def __init__(self,
method forward (line 117) | def forward(self, inputs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/plugins/dropblock.py
class DropBlock (line 11) | class DropBlock(nn.Module):
method __init__ (line 25) | def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):
method forward (line 35) | def forward(self, x):
method _compute_gamma (line 62) | def _compute_gamma(self, feat_size):
method extra_repr (line 83) | def extra_repr(self):
FILE: DLTA_AI_app/mmdetection/mmdet/models/plugins/msdeformattn_pixel_decoder.py
class MSDeformAttnPixelDecoder (line 16) | class MSDeformAttnPixelDecoder(BaseModule):
method __init__ (line 40) | def __init__(self,
method init_weights (line 135) | def init_weights(self):
method forward (line 161) | def forward(self, feats):
FILE: DLTA_AI_app/mmdetection/mmdet/models/plugins/pixel_decoder.py
class PixelDecoder (line 12) | class PixelDecoder(BaseModule):
method __init__ (line 34) | def __init__(self,
method init_weights (line 79) | def init_weights(self):
method forward (line 88) | def forward(self, feats, img_metas):
class TransformerEncoderPixelDecoder (line 116) | class TransformerEncoderPixelDecoder(PixelDecoder):
method __init__ (line 138) | def __init__(self,
method init_weights (line 178) | def init_weights(self):
method forward (line 192) | def forward(self, feats, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/base_roi_head.py
class BaseRoIHead (line 9) | class BaseRoIHead(BaseModule, metaclass=ABCMeta):
method __init__ (line 12) | def __init__(self,
method with_bbox (line 38) | def with_bbox(self):
method with_mask (line 43) | def with_mask(self):
method with_shared_head (line 48) | def with_shared_head(self):
method init_bbox_head (line 53) | def init_bbox_head(self):
method init_mask_head (line 58) | def init_mask_head(self):
method init_assigner_sampler (line 63) | def init_assigner_sampler(self):
method forward_train (line 68) | def forward_train(self,
method async_simple_test (line 79) | async def async_simple_test(self,
method simple_test (line 89) | def simple_test(self,
method aug_test (line 98) | def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/bbox_head.py
class BBoxHead (line 15) | class BBoxHead(BaseModule):
method __init__ (line 19) | def __init__(self,
method custom_cls_channels (line 97) | def custom_cls_channels(self):
method custom_activation (line 101) | def custom_activation(self):
method custom_accuracy (line 105) | def custom_accuracy(self):
method forward (line 109) | def forward(self, x):
method _get_target_single (line 122) | def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,
method get_targets (line 189) | def get_targets(self,
method loss (line 257) | def loss(self,
method get_bboxes (line 316) | def get_bboxes(self,
method refine_bboxes (line 381) | def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
method regress_by_class (line 460) | def regress_by_class(self, rois, label, bbox_pred, img_meta):
method onnx_export (line 499) | def onnx_export(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py
class ConvFCBBoxHead (line 11) | class ConvFCBBoxHead(BBoxHead):
method __init__ (line 22) | def __init__(self,
method _add_conv_fc_branch (line 118) | def _add_conv_fc_branch(self,
method forward (line 159) | def forward(self, x):
class Shared2FCBBoxHead (line 201) | class Shared2FCBBoxHead(ConvFCBBoxHead):
method __init__ (line 203) | def __init__(self, fc_out_channels=1024, *args, **kwargs):
class Shared4Conv1FCBBoxHead (line 217) | class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
method __init__ (line 219) | def __init__(self, fc_out_channels=1024, *args, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/dii_head.py
class DIIHead (line 18) | class DIIHead(BBoxHead):
method __init__ (line 46) | def __init__(self,
method init_weights (line 125) | def init_weights(self):
method forward (line 141) | def forward(self, roi_feat, proposal_feat):
method loss (line 201) | def loss(self,
method _get_target_single (line 285) | def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes,
method get_targets (line 359) | def get_targets(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py
class BasicResBlock (line 11) | class BasicResBlock(BaseModule):
method __init__ (line 26) | def __init__(self,
method forward (line 63) | def forward(self, x):
class DoubleConvFCBBoxHead (line 77) | class DoubleConvFCBBoxHead(BBoxHead):
method __init__ (line 91) | def __init__(self,
method _add_conv_branch (line 136) | def _add_conv_branch(self):
method _add_fc_branch (line 148) | def _add_fc_branch(self):
method forward (line 158) | def forward(self, x_cls, x_reg):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/sabl_head.py
class SABLHead (line 15) | class SABLHead(BaseModule):
method __init__ (line 58) | def __init__(self,
method custom_cls_channels (line 210) | def custom_cls_channels(self):
method custom_activation (line 214) | def custom_activation(self):
method custom_accuracy (line 218) | def custom_accuracy(self):
method _add_fc_branch (line 221) | def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,
method cls_forward (line 230) | def cls_forward(self, cls_x):
method attention_pool (line 237) | def attention_pool(self, reg_x):
method side_aware_feature_extractor (line 250) | def side_aware_feature_extractor(self, reg_x):
method reg_pred (line 271) | def reg_pred(self, x, offset_fcs, cls_fcs):
method side_aware_split (line 289) | def side_aware_split(self, feat):
method bbox_pred_split (line 301) | def bbox_pred_split(self, bbox_pred, num_proposals_per_img):
method reg_forward (line 310) | def reg_forward(self, reg_x):
method forward (line 329) | def forward(self, x):
method get_targets (line 336) | def get_targets(self, sampling_results, gt_bboxes, gt_labels,
method bucket_target (line 351) | def bucket_target(self,
method _bucket_target_single (line 377) | def _bucket_target_single(self, pos_proposals, neg_proposals,
method loss (line 440) | def loss(self,
method get_bboxes (line 485) | def get_bboxes(self,
method refine_bboxes (line 527) | def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
method regress_by_class (line 573) | def regress_by_class(self, rois, label, bbox_pred, img_meta):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py
class SCNetBBoxHead (line 7) | class SCNetBBoxHead(ConvFCBBoxHead):
method _forward_shared (line 14) | def _forward_shared(self, x):
method _forward_cls_reg (line 31) | def _forward_cls_reg(self, x):
method forward (line 59) | def forward(self, x, return_shared_feat=False):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/cascade_roi_head.py
class CascadeRoIHead (line 16) | class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
method __init__ (line 22) | def __init__(self,
method init_bbox_head (line 52) | def init_bbox_head(self, bbox_roi_extractor, bbox_head):
method init_mask_head (line 72) | def init_mask_head(self, mask_roi_extractor, mask_head):
method init_assigner_sampler (line 100) | def init_assigner_sampler(self):
method forward_dummy (line 112) | def forward_dummy(self, x, proposals):
method _bbox_forward (line 130) | def _bbox_forward(self, stage, x, rois):
method _bbox_forward_train (line 143) | def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,
method _mask_forward (line 158) | def _mask_forward(self, stage, x, rois):
method _mask_forward_train (line 170) | def _mask_forward_train(self,
method forward_train (line 191) | def forward_train(self,
method simple_test (line 288) | def simple_test(self, x, proposal_list, img_metas, rescale=False):
method aug_test (line 458) | def aug_test(self, features, proposal_list, img_metas, rescale=False):
method onnx_export (line 561) | def onnx_export(self, x, proposals, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/double_roi_head.py
class DoubleHeadRoIHead (line 7) | class DoubleHeadRoIHead(StandardRoIHead):
method __init__ (line 13) | def __init__(self, reg_roi_scale_factor, **kwargs):
method _bbox_forward (line 17) | def _bbox_forward(self, x, rois):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/dynamic_roi_head.py
class DynamicRoIHead (line 14) | class DynamicRoIHead(StandardRoIHead):
method __init__ (line 17) | def __init__(self, **kwargs):
method forward_train (line 25) | def forward_train(self,
method _bbox_forward_train (line 109) | def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
method update_hyperparameters (line 134) | def update_hyperparameters(self):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/grid_roi_head.py
class GridRoIHead (line 11) | class GridRoIHead(StandardRoIHead):
method __init__ (line 17) | def __init__(self, grid_roi_extractor, grid_head, **kwargs):
method _random_jitter (line 28) | def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):
method forward_dummy (line 53) | def forward_dummy(self, x, proposals):
method _bbox_forward_train (line 79) | def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
method simple_test (line 117) | def simple_test(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/htc_roi_head.py
class HybridTaskCascadeRoIHead (line 14) | class HybridTaskCascadeRoIHead(CascadeRoIHead):
method __init__ (line 20) | def __init__(self,
method with_semantic (line 44) | def with_semantic(self):
method forward_dummy (line 51) | def forward_dummy(self, x, proposals):
method _bbox_forward_train (line 86) | def _bbox_forward_train(self,
method _mask_forward_train (line 113) | def _mask_forward_train(self,
method _bbox_forward (line 158) | def _bbox_forward(self, stage, x, rois, semantic_feat=None):
method _mask_forward_test (line 176) | def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):
method forward_train (line 205) | def forward_train(self,
method simple_test (line 330) | def simple_test(self, x, proposal_list, img_metas, rescale=False):
method aug_test (line 505) | def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py
class CoarseMaskHead (line 10) | class CoarseMaskHead(FCNMaskHead):
method __init__ (line 26) | def __init__(self,
method init_weights (line 84) | def init_weights(self):
method forward (line 88) | def forward(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py
class DynamicMaskHead (line 14) | class DynamicMaskHead(FCNMaskHead):
method __init__ (line 42) | def __init__(self,
method init_weights (line 83) | def init_weights(self):
method forward (line 92) | def forward(self, roi_feat, proposal_feat):
method loss (line 125) | def loss(self, mask_pred, mask_targets, labels):
method get_targets (line 139) | def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py
class FCNMaskHead (line 23) | class FCNMaskHead(BaseModule):
method __init__ (line 25) | def __init__(self,
method init_weights (line 115) | def init_weights(self):
method forward (line 128) | def forward(self, x):
method get_targets (line 138) | def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
method loss (line 148) | def loss(self, mask_pred, mask_targets, labels):
method get_seg_masks (line 179) | def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
method onnx_export (line 312) | def onnx_export(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
function _do_paste_mask (line 344) | def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/feature_relay_head.py
class FeatureRelayHead (line 9) | class FeatureRelayHead(BaseModule):
method __init__ (line 22) | def __init__(self,
method forward (line 43) | def forward(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py
class FusedSemanticHead (line 13) | class FusedSemanticHead(BaseModule):
method __init__ (line 29) | def __init__(self,
method forward (line 97) | def forward(self, feats):
method loss (line 115) | def loss(self, mask_pred, labels):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/global_context_head.py
class GlobalContextHead (line 11) | class GlobalContextHead(BaseModule):
method __init__ (line 29) | def __init__(self,
method forward (line 80) | def forward(self, feats):
method loss (line 94) | def loss(self, pred, labels):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/grid_head.py
class GridHead (line 13) | class GridHead(BaseModule):
method __init__ (line 15) | def __init__(self,
method forward (line 155) | def forward(self, x):
method calc_sub_regions (line 193) | def calc_sub_regions(self):
method get_targets (line 224) | def get_targets(self, sampling_results, rcnn_train_cfg):
method loss (line 292) | def loss(self, grid_pred, grid_targets):
method get_bboxes (line 298) | def get_bboxes(self, det_bboxes, grid_pred, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/htc_mask_head.py
class HTCMaskHead (line 9) | class HTCMaskHead(FCNMaskHead):
method __init__ (line 11) | def __init__(self, with_conv_res=True, *args, **kwargs):
method forward (line 22) | def forward(self, x, res_feat=None, return_logits=True, return_feat=Tr...
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/mask_point_head.py
class MaskPointHead (line 16) | class MaskPointHead(BaseModule):
method __init__ (line 42) | def __init__(self,
method forward (line 88) | def forward(self, fine_grained_feats, coarse_feats):
method get_targets (line 109) | def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
method _get_target_single (line 149) | def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
method loss (line 167) | def loss(self, point_pred, point_targets, labels):
method get_roi_rel_points_train (line 190) | def get_roi_rel_points_train(self, mask_pred, labels, cfg):
method get_roi_rel_points_test (line 215) | def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/maskiou_head.py
class MaskIoUHead (line 13) | class MaskIoUHead(BaseModule):
method __init__ (line 19) | def __init__(self,
method forward (line 73) | def forward(self, mask_feat, mask_pred):
method loss (line 88) | def loss(self, mask_iou_pred, mask_iou_targets):
method get_targets (line 98) | def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targ...
method _get_area_ratio (line 147) | def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
method get_mask_scores (line 174) | def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py
class SCNetMaskHead (line 8) | class SCNetMaskHead(FCNMaskHead):
method __init__ (line 16) | def __init__(self, conv_to_res=True, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py
class SCNetSemanticHead (line 8) | class SCNetSemanticHead(FusedSemanticHead):
method __init__ (line 16) | def __init__(self, conv_to_res=True, **kwargs):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/mask_scoring_roi_head.py
class MaskScoringRoIHead (line 10) | class MaskScoringRoIHead(StandardRoIHead):
method __init__ (line 16) | def __init__(self, mask_iou_head, **kwargs):
method _mask_forward_train (line 21) | def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
method simple_test_mask (line 49) | def simple_test_mask(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/pisa_roi_head.py
class PISARoIHead (line 9) | class PISARoIHead(StandardRoIHead):
method forward_train (line 13) | def forward_train(self,
method _bbox_forward (line 87) | def _bbox_forward(self, x, rois):
method _bbox_forward_train (line 100) | def _bbox_forward_train(self,
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/point_rend_roi_head.py
class PointRendRoIHead (line 18) | class PointRendRoIHead(StandardRoIHead):
method __init__ (line 21) | def __init__(self, point_head, *args, **kwargs):
method init_point_head (line 26) | def init_point_head(self, point_head):
method _mask_forward_train (line 30) | def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
method _mask_point_forward_train (line 45) | def _mask_point_forward_train(self, x, sampling_results, mask_pred,
method _get_fine_grained_point_feats (line 66) | def _get_fine_grained_point_feats(self, x, rois, rel_roi_points,
method _mask_point_forward_test (line 104) | def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,
method simple_test_mask (line 155) | def simple_test_mask(self,
method aug_test_mask (line 217) | def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
method _onnx_get_fine_grained_point_feats (line 250) | def _onnx_get_fine_grained_point_feats(self, x, rois, rel_roi_points):
method _mask_point_onnx_export (line 284) | def _mask_point_onnx_export(self, x, rois, label_pred, mask_pred):
method mask_onnx_export (line 351) | def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwa...
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py
class BaseRoIExtractor (line 10) | class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
method __init__ (line 21) | def __init__(self,
method num_inputs (line 33) | def num_inputs(self):
method build_roi_layers (line 37) | def build_roi_layers(self, layer_cfg, featmap_strides):
method roi_rescale (line 62) | def roi_rescale(self, rois, scale_factor):
method forward (line 87) | def forward(self, feats, rois, roi_scale_factor=None):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py
class GenericRoIExtractor (line 10) | class GenericRoIExtractor(BaseRoIExtractor):
method __init__ (line 25) | def __init__(self,
method forward (line 44) | def forward(self, feats, rois, roi_scale_factor=None):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py
class SingleRoIExtractor (line 10) | class SingleRoIExtractor(BaseRoIExtractor):
method __init__ (line 26) | def __init__(self,
method map_roi_levels (line 36) | def map_roi_levels(self, rois, num_levels):
method forward (line 58) | def forward(self, feats, rois, roi_scale_factor=None):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/scnet_roi_head.py
class SCNetRoIHead (line 14) | class SCNetRoIHead(CascadeRoIHead):
method __init__ (line 26) | def __init__(self,
method init_mask_head (line 50) | def init_mask_head(self, mask_roi_extractor, mask_head):
method with_semantic (line 57) | def with_semantic(self):
method with_feat_relay (line 63) | def with_feat_relay(self):
method with_glbctx (line 69) | def with_glbctx(self):
method _fuse_glbctx (line 73) | def _fuse_glbctx(self, roi_feats, glbctx_feat, rois):
method _slice_pos_feats (line 83) | def _slice_pos_feats(self, feats, sampling_results):
method _bbox_forward (line 96) | def _bbox_forward(self,
method _mask_forward (line 125) | def _mask_forward(self,
method _bbox_forward_train (line 150) | def _bbox_forward_train(self,
method _mask_forward_train (line 179) | def _mask_forward_train(self,
method forward_train (line 206) | def forward_train(self,
method simple_test (line 314) | def simple_test(self, x, proposal_list, img_metas, rescale=False):
method aug_test (line 487) | def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/shared_heads/res_layer.py
class ResLayer (line 13) | class ResLayer(BaseModule):
method __init__ (line 15) | def __init__(self,
method forward (line 70) | def forward(self, x):
method train (line 75) | def train(self, mode=True):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/sparse_roi_head.py
class SparseRoIHead (line 12) | class SparseRoIHead(CascadeRoIHead):
method __init__ (line 37) | def __init__(self,
method _bbox_forward (line 88) | def _bbox_forward(self, stage, x, rois, object_feats, img_metas):
method _mask_forward (line 151) | def _mask_forward(self, stage, x, rois, attn_feats):
method _mask_forward_train (line 163) | def _mask_forward_train(self, stage, x, attn_feats, sampling_results,
method forward_train (line 184) | def forward_train(self,
method simple_test (line 276) | def simple_test(self,
method aug_test (line 400) | def aug_test(self, features, proposal_list, img_metas, rescale=False):
method forward_dummy (line 404) | def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/standard_roi_head.py
class StandardRoIHead (line 11) | class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
method init_assigner_sampler (line 14) | def init_assigner_sampler(self):
method init_bbox_head (line 23) | def init_bbox_head(self, bbox_roi_extractor, bbox_head):
method init_mask_head (line 28) | def init_mask_head(self, mask_roi_extractor, mask_head):
method forward_dummy (line 38) | def forward_dummy(self, x, proposals):
method forward_train (line 54) | def forward_train(self,
method _bbox_forward (line 118) | def _bbox_forward(self, x, rois):
method _bbox_forward_train (line 131) | def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
method _mask_forward_train (line 146) | def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
method _mask_forward (line 181) | def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
method async_simple_test (line 198) | async def async_simple_test(self,
method simple_test (line 223) | def simple_test(self,
method aug_test (line 269) | def aug_test(self, x, proposal_list, img_metas, rescale=False):
method onnx_export (line 295) | def onnx_export(self, x, proposals, img_metas, rescale=False):
method mask_onnx_export (line 308) | def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwa...
method bbox_onnx_export (line 350) | def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg,
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/test_mixins.py
class BBoxTestMixin (line 15) | class BBoxTestMixin:
method async_test_bboxes (line 19) | async def async_test_bboxes(self,
method simple_test_bboxes (line 51) | def simple_test_bboxes(self,
method aug_test_bboxes (line 138) | def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_c...
class MaskTestMixin (line 179) | class MaskTestMixin:
method async_test_mask (line 183) | async def async_test_mask(self,
method simple_test_mask (line 224) | def simple_test_mask(self,
method aug_test_mask (line 281) | def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
FILE: DLTA_AI_app/mmdetection/mmdet/models/roi_heads/trident_roi_head.py
class TridentRoIHead (line 12) | class TridentRoIHead(StandardRoIHead):
method __init__ (line 22) | def __init__(self, num_branch, test_branch_idx, **kwargs):
method merge_trident_bboxes (line 27) | def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels):
method simple_test (line 46) | def simple_test(self,
method aug_test_bboxes (line 83) | def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_c...
FILE: DLTA_AI_app/mmdetection/mmdet/models/seg_heads/base_semantic_head.py
class BaseSemanticHead (line 11) | class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
method __init__ (line 20) | def __init__(self,
method loss (line 32) | def loss(self, seg_preds, gt_semantic_seg):
method forward (line 55) | def forward(self, x):
method forward_train (line 65) | def forward_train(self, x, gt_semantic_seg):
method simple_test (line 70) | def simple_test(self, x, img_metas, rescale=False):
FILE: DLTA_AI_app/mmdetection/mmdet/models/seg_heads/panoptic_fpn_head.py
class PanopticFPNHead (line 14) | class PanopticFPNHead(BaseSemanticHead):
method __init__ (line 52) | def __init__(self,
method _set_things_to_void (line 108) | def _set_things_to_void(self, gt_semantic_seg):
method loss (line 129) | def loss(self, seg_preds, gt_semantic_seg):
method init_weights (line 137) | def init_weights(self):
method forward (line 142) | def forward(self, x):
FILE: DLTA_AI_app/mmdetection/mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py
class BasePanopticFusionHead (line 9)
Condensed preview — 1415 files, each showing path, character count, and a content snippet. Download the .json file or copy for the full structured content (6,465K chars).
[
{
"path": ".github/workflows/retype-action.yml",
"chars": 423,
"preview": "name: Publish Retype powered website to GitHub Pages\non:\n workflow_dispatch:\n push:\n branches:\n - master\n\njobs"
},
{
"path": ".gitignore",
"chars": 80,
"preview": "\n*.pt\n*.pth\n# anything in the folders : datasets , runs\nruns/\n*.lnk\n*.ps1\n*.docx"
},
{
"path": "DLTA_AI_app/.flake8",
"chars": 64,
"preview": "[flake8]\nexclude = .anaconda3/*\nignore = E203, E741, W503, W504\n"
},
{
"path": "DLTA_AI_app/.gitignore",
"chars": 167,
"preview": "/.cache/\n/.pytest_cache/\n\n/build/\n/dist/\n/*.egg-info/\n\n*.py[cdo]\n\n.DS_Store\n.idea/\n\n# mp4\n*.mp4\n\n# any thing in the fold"
},
{
"path": "DLTA_AI_app/.gitmodules",
"chars": 97,
"preview": "[submodule \"github2pypi\"]\n\tpath = github2pypi\n\turl = https://github.com/wkentaro/github2pypi.git\n"
},
{
"path": "DLTA_AI_app/__main__.py",
"chars": 2245,
"preview": "import os\nimport sys\n\nsys.path.append(os.path.dirname(os.path.realpath(__file__)))\nos.chdir(os.path.dirname(os.path.real"
},
{
"path": "DLTA_AI_app/__main__.spec",
"chars": 1359,
"preview": "# -*- mode: python -*-\n# vim: ft=python\n\nfrom glob import glob\n\n\nblock_cipher = None\n\ndatas_list = [ \n ('models_menu/"
},
{
"path": "DLTA_AI_app/inferencing.py",
"chars": 14540,
"preview": "\nimport copy\nfrom supervision.detection.core import Detections\nfrom time import time\nimport torch\nfrom mmdet.apis import"
},
{
"path": "DLTA_AI_app/labelme/__init__.py",
"chars": 602,
"preview": "# flake8: noqa\n\nimport logging\nimport sys\n\nfrom qtpy import QT_VERSION\n\n\n__appname__ = \"DLTA-AI\"\n\n# Semantic Versioning "
},
{
"path": "DLTA_AI_app/labelme/app.py",
"chars": 207004,
"preview": "# -*- coding: utf-8 -*-\nimport functools\nimport json\nimport math\nimport re\nimport copy\nimport imgviz\nimport torch\nimport"
},
{
"path": "DLTA_AI_app/labelme/cli/__init__.py",
"chars": 123,
"preview": "# flake8: noqa\n\nfrom . import draw_json\nfrom . import draw_label_png\nfrom . import json_to_dataset\nfrom . import on_dock"
},
{
"path": "DLTA_AI_app/labelme/cli/draw_json.py",
"chars": 1355,
"preview": "#!/usr/bin/env python\n\nimport argparse\nimport sys\n\nimport imgviz\nimport matplotlib.pyplot as plt\n\nfrom labelme.label_fil"
},
{
"path": "DLTA_AI_app/labelme/cli/draw_label_png.py",
"chars": 636,
"preview": "import argparse\n\nimport imgviz\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport PIL.Image\n\nfrom labelme.logger "
},
{
"path": "DLTA_AI_app/labelme/cli/json_to_dataset.py",
"chars": 2398,
"preview": "import argparse\nimport base64\nimport json\nimport os\nimport os.path as osp\n\nimport imgviz\nimport PIL.Image\n\nfrom labelme."
},
{
"path": "DLTA_AI_app/labelme/cli/on_docker.py",
"chars": 2749,
"preview": "#!/usr/bin/env python\n\nfrom __future__ import print_function\n\nimport argparse\nimport distutils.spawn\nimport json\nimport "
},
{
"path": "DLTA_AI_app/labelme/config/__init__.py",
"chars": 2698,
"preview": "import os.path as osp\nimport shutil\n\nimport yaml\n\nfrom labelme.logger import logger\n\n\nhere = osp.dirname(osp.abspath(__f"
},
{
"path": "DLTA_AI_app/labelme/config/default_config.yaml",
"chars": 3482,
"preview": "auto_save: false\r\ncanvas:\r\n double_click: close\r\n num_backups: 10\r\ndefault_classes:\r\n- person\r\n- bicycle\r\n- car\r\n- mot"
},
{
"path": "DLTA_AI_app/labelme/config/default_config_base.yaml",
"chars": 3255,
"preview": "auto_save: false\ncanvas:\n double_click: close\n num_backups: 10\ndefault_classes:\n- person\n- bicycle\n- car\n- motorcycle\n"
},
{
"path": "DLTA_AI_app/labelme/intelligence.py",
"chars": 14925,
"preview": "from ultralytics import YOLO\nimport json\nimport time\ntry:\n from inferencing import models_inference\nexcept ModuleNotF"
},
{
"path": "DLTA_AI_app/labelme/label_file.py",
"chars": 6474,
"preview": "import base64\nimport contextlib\nimport io\nimport json\nimport os.path as osp\n\nimport PIL.Image\n\nfrom labelme import __ver"
},
{
"path": "DLTA_AI_app/labelme/logger.py",
"chars": 1841,
"preview": "import datetime\nimport logging\nimport os\n\nimport termcolor\n\nif os.name == \"nt\": # Windows\n import colorama\n\n colo"
},
{
"path": "DLTA_AI_app/labelme/shape.py",
"chars": 9394,
"preview": "import copy\nimport math\n\nfrom PyQt6 import QtCore\nfrom PyQt6 import QtGui\n\nimport labelme.utils\n\n\n# TODO(unknown):\n# - ["
},
{
"path": "DLTA_AI_app/labelme/testing.py",
"chars": 849,
"preview": "import json\nimport os.path as osp\n\nimport imgviz\nimport labelme.utils\n\n\ndef assert_labelfile_sanity(filename):\n asser"
},
{
"path": "DLTA_AI_app/labelme/utils/__init__.py",
"chars": 1093,
"preview": "# flake8: noqa\n\nfrom ._io import lblsave\n\nfrom .image import apply_exif_orientation\nfrom .image import img_arr_to_b64\nfr"
},
{
"path": "DLTA_AI_app/labelme/utils/_io.py",
"chars": 675,
"preview": "import os.path as osp\n\nimport numpy as np\nimport PIL.Image\n\n\ndef lblsave(filename, lbl):\n import imgviz\n\n if osp.s"
},
{
"path": "DLTA_AI_app/labelme/utils/custom_exports.py",
"chars": 5156,
"preview": "# Don't Modify These Lines\n# =========================================\ncustom_exports_list = []\n\n# custom export class b"
},
{
"path": "DLTA_AI_app/labelme/utils/export.py",
"chars": 16841,
"preview": "import datetime\nimport glob\nimport json\nimport os\nimport csv\nimport numpy as np\nfrom PyQt6.QtWidgets import QFileDialog\n"
},
{
"path": "DLTA_AI_app/labelme/utils/helpers/mathOps.py",
"chars": 34020,
"preview": "import numpy as np\nimport random\nimport cv2\nfrom PyQt6 import QtGui\nfrom PyQt6 import QtCore\nfrom labelme import PY2\nimp"
},
{
"path": "DLTA_AI_app/labelme/utils/helpers/visualizations.py",
"chars": 12954,
"preview": "import numpy as np\nimport cv2\nfrom .mathOps import *\n\n\ncoco_classes = ['person', 'bicycle', 'car', 'motorcycle', 'airpla"
},
{
"path": "DLTA_AI_app/labelme/utils/image.py",
"chars": 2367,
"preview": "import base64\nimport io\n\nimport numpy as np\nimport PIL.ExifTags\nimport PIL.Image\nimport PIL.ImageOps\n\n\ndef img_data_to_p"
},
{
"path": "DLTA_AI_app/labelme/utils/model_explorer.py",
"chars": 14301,
"preview": "from PyQt6 import QtWidgets, QtCore, QtGui\nfrom PyQt6.QtWidgets import QDialog, QToolBar, QTableWidget, QTableWidgetItem"
},
{
"path": "DLTA_AI_app/labelme/utils/qt.py",
"chars": 2407,
"preview": "from math import sqrt\nimport os.path as osp\n\nimport numpy as np\n\nfrom PyQt6 import QtCore\nfrom PyQt6 import QtGui\nfrom P"
},
{
"path": "DLTA_AI_app/labelme/utils/sam.py",
"chars": 9840,
"preview": "from segment_anything import sam_model_registry, SamPredictor, SamAutomaticMaskGenerator\nimport numpy as np\nimport torch"
},
{
"path": "DLTA_AI_app/labelme/utils/shape.py",
"chars": 3650,
"preview": "import math\nimport uuid\n\nimport numpy as np\nimport PIL.Image\nimport PIL.ImageDraw\n\nfrom labelme.logger import logger\n\n\nd"
},
{
"path": "DLTA_AI_app/labelme/utils/vid_to_frames.py",
"chars": 15555,
"preview": "import os\nimport sys\nimport cv2\nfrom PyQt6.QtWidgets import QApplication, QWidget, QLabel, QPushButton, QFileDialog, QSl"
},
{
"path": "DLTA_AI_app/labelme/widgets/ClassesWidget.py",
"chars": 1771,
"preview": "\nfrom PyQt6 import QtCore\nfrom PyQt6 import QtGui\nfrom PyQt6 import QtWidgets\n\n# add ClassWidget and allow the user to s"
},
{
"path": "DLTA_AI_app/labelme/widgets/MsgBox.py",
"chars": 1175,
"preview": "from PyQt6 import QtWidgets\n\n\ndef OKmsgBox(title, text, type = \"info\", turnResult = False):\n \n \"\"\"\n Show a mess"
},
{
"path": "DLTA_AI_app/labelme/widgets/ThresholdWidget.py",
"chars": 571,
"preview": "from PyQt6 import QtCore\nfrom PyQt6 import QtGui\nfrom PyQt6 import QtWidgets\n\n\nclass ThresholdWidget(QtWidgets.QDialog):"
},
{
"path": "DLTA_AI_app/labelme/widgets/__init__.py",
"chars": 458,
"preview": "# flake8: noqa\n\nfrom .brightness_contrast_dialog import BrightnessContrastDialog\n\nfrom .canvas import Canvas\n\nfrom .colo"
},
{
"path": "DLTA_AI_app/labelme/widgets/brightness_contrast_dialog.py",
"chars": 1483,
"preview": "import PIL.Image\nimport PIL.ImageEnhance\nfrom PyQt6.QtCore import Qt\nfrom PyQt6 import QtGui\nfrom PyQt6 import QtWidgets"
},
{
"path": "DLTA_AI_app/labelme/widgets/canvas.py",
"chars": 36585,
"preview": "from PyQt6 import QtCore\nfrom PyQt6 import QtGui\nfrom PyQt6 import QtWidgets\n\nfrom labelme import QT5\nfrom labelme.shape"
},
{
"path": "DLTA_AI_app/labelme/widgets/check_updates_UI.py",
"chars": 2677,
"preview": "from labelme.widgets.links import open_release\nfrom bs4 import BeautifulSoup\nimport requests\nfrom PyQt6.QtWidgets import"
},
{
"path": "DLTA_AI_app/labelme/widgets/color_dialog.py",
"chars": 1272,
"preview": "from PyQt6 import QtWidgets\n\n\nclass ColorDialog(QtWidgets.QColorDialog):\n def __init__(self, parent=None):\n su"
},
{
"path": "DLTA_AI_app/labelme/widgets/deleteSelectedShape_UI.py",
"chars": 4326,
"preview": "from PyQt6 import QtCore\nfrom PyQt6.QtCore import Qt\nfrom PyQt6 import QtWidgets\n\n\ndef PopUp(TOTAL_VIDEO_FRAMES, INDEX_O"
},
{
"path": "DLTA_AI_app/labelme/widgets/editLabel_videoMode.py",
"chars": 11590,
"preview": "from PyQt6.QtCore import Qt\nfrom PyQt6 import QtWidgets\nfrom labelme.widgets.MsgBox import OKmsgBox\nfrom labelme.utils.h"
},
{
"path": "DLTA_AI_app/labelme/widgets/escapable_qlist_widget.py",
"chars": 287,
"preview": "from PyQt6.QtCore import Qt\nfrom PyQt6 import QtWidgets\n\n\nclass EscapableQListWidget(QtWidgets.QListWidget):\n def key"
},
{
"path": "DLTA_AI_app/labelme/widgets/exportData_UI.py",
"chars": 5371,
"preview": "from PyQt6.QtCore import Qt\nfrom PyQt6 import QtGui\nfrom PyQt6 import QtWidgets\nfrom labelme.widgets import open_file\n\nt"
},
{
"path": "DLTA_AI_app/labelme/widgets/feedback_UI.py",
"chars": 972,
"preview": "from labelme.widgets.links import open_issue\nfrom PyQt6.QtWidgets import QMessageBox\nfrom PyQt6.QtCore import Qt\n\n\ndef P"
},
{
"path": "DLTA_AI_app/labelme/widgets/getIDfromUser_UI.py",
"chars": 2078,
"preview": "from PyQt6.QtCore import Qt\nfrom PyQt6 import QtWidgets\nfrom .MsgBox import OKmsgBox\nfrom labelme.utils.helpers.mathOps "
},
{
"path": "DLTA_AI_app/labelme/widgets/interpolation_UI.py",
"chars": 3567,
"preview": "from PyQt6 import QtCore\nfrom PyQt6.QtCore import Qt\nfrom PyQt6 import QtGui\nfrom PyQt6 import QtWidgets\n\n\n\ndef PopUp(co"
},
{
"path": "DLTA_AI_app/labelme/widgets/label_dialog.py",
"chars": 10840,
"preview": "import re\n\nfrom qtpy import QT_VERSION\nfrom PyQt6 import QtCore\nfrom PyQt6 import QtGui\nfrom PyQt6 import QtWidgets\n\nfro"
},
{
"path": "DLTA_AI_app/labelme/widgets/label_list_widget.py",
"chars": 6081,
"preview": "from PyQt6 import QtCore\nfrom PyQt6.QtCore import Qt\nfrom PyQt6 import QtGui\nfrom PyQt6.QtGui import QPalette\nfrom PyQt6"
},
{
"path": "DLTA_AI_app/labelme/widgets/links.py",
"chars": 1754,
"preview": "import webbrowser\n\ndef open_git_hub():\n \"\"\"\n Opens the GitHub repository for the DLTA-AI project in the default we"
},
{
"path": "DLTA_AI_app/labelme/widgets/merge_feature_UI.py",
"chars": 2952,
"preview": "import json\nfrom PyQt6 import QtWidgets\nfrom PyQt6 import QtCore\n\n\n# create an interface for merging features\nclass Merg"
},
{
"path": "DLTA_AI_app/labelme/widgets/notification.py",
"chars": 785,
"preview": "import os\n\ndef PopUp(text):\n \"\"\"\n Sends a desktop notification with the given text.\n\n Args:\n text (str):"
},
{
"path": "DLTA_AI_app/labelme/widgets/open_file.py",
"chars": 869,
"preview": "import os\nimport subprocess\nimport platform\n\n\n\n\ndef PopUp():\n \"\"\"\n Open a file with the default application for th"
},
{
"path": "DLTA_AI_app/labelme/widgets/preferences_UI.py",
"chars": 4772,
"preview": "import yaml\nfrom PyQt6 import QtWidgets, QtGui, QtCore\n\n\n\n\n\ndef PopUp():\n \"\"\"\n\n Description:\n This function dis"
},
{
"path": "DLTA_AI_app/labelme/widgets/runtime_data_UI.py",
"chars": 2490,
"preview": "from PyQt6.QtWidgets import QDialog, QLabel, QVBoxLayout\nfrom PyQt6.QtGui import QFont\nfrom PyQt6 import QtCore\nimport p"
},
{
"path": "DLTA_AI_app/labelme/widgets/scaleObject_UI.py",
"chars": 2698,
"preview": "from PyQt6 import QtCore\nfrom PyQt6.QtCore import Qt\nfrom PyQt6 import QtWidgets\nfrom labelme.utils.helpers.mathOps impo"
},
{
"path": "DLTA_AI_app/labelme/widgets/segmentation_options_UI.py",
"chars": 13647,
"preview": "# relevant imports for the functions\nfrom PyQt6 import QtCore\nfrom PyQt6 import QtWidgets\nimport yaml\nfrom ..utils.helpe"
},
{
"path": "DLTA_AI_app/labelme/widgets/shortcut_selector_UI.py",
"chars": 7025,
"preview": "import yaml\nfrom PyQt6 import QtWidgets, QtGui, QtCore\n\n\ndef PopUp():\n \"\"\"\n Displays a dialog box for selecting an"
},
{
"path": "DLTA_AI_app/labelme/widgets/tool_bar.py",
"chars": 997,
"preview": "from PyQt6 import QtCore\nfrom PyQt6 import QtWidgets\n\n\nclass ToolBar(QtWidgets.QToolBar):\n def __init__(self, title):"
},
{
"path": "DLTA_AI_app/labelme/widgets/unique_label_qlist_widget.py",
"chars": 1284,
"preview": "# -*- encoding: utf-8 -*-\n\nfrom PyQt6.QtCore import Qt\nfrom PyQt6 import QtWidgets\n\nfrom .escapable_qlist_widget import "
},
{
"path": "DLTA_AI_app/labelme/widgets/zoom_widget.py",
"chars": 743,
"preview": "from PyQt6 import QtCore\nfrom PyQt6 import QtGui\nfrom PyQt6 import QtWidgets\n\n\nclass ZoomWidget(QtWidgets.QSpinBox):\n "
},
{
"path": "DLTA_AI_app/mmdetection/.circleci/config.yml",
"chars": 5280,
"preview": "version: 2.1\n\njobs:\n lint:\n docker:\n - image: cimg/python:3.7.4\n steps:\n - checkout\n - run:\n "
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/batch_test_list.py",
"chars": 12707,
"preview": "# Copyright (c) OpenMMLab. All rights reserved.\n# yapf: disable\natss = dict(\n config='configs/atss/atss_r50_fpn_1x_co"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/batch_train_list.txt",
"chars": 3429,
"preview": "configs/atss/atss_r50_fpn_1x_coco.py\nconfigs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py\nconfigs/cascade_rcnn/cascade_m"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/benchmark_filter.py",
"chars": 7106,
"preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\n\n\ndef parse_args():\n "
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/benchmark_inference_fps.py",
"chars": 6764,
"preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\n\nimport mmcv\nfrom mmcv i"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/benchmark_test_image.py",
"chars": 3674,
"preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport logging\nimport os.path as osp\nfrom argparse import ArgumentParser"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/check_links.py",
"chars": 5049,
"preview": "# Modified from:\n# https://github.com/allenai/allennlp/blob/main/scripts/check_links.py\n\nimport argparse\nimport logging\n"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/convert_test_benchmark_script.py",
"chars": 3604,
"preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\n\nfrom mmcv import Config"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/convert_train_benchmark_script.py",
"chars": 3307,
"preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport os\nimport os.path as osp\n\n\ndef parse_args():\n "
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/gather_models.py",
"chars": 12397,
"preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport glob\nimport json\nimport os.path as osp\nimport shu"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/gather_test_benchmark_metric.py",
"chars": 3916,
"preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport glob\nimport os.path as osp\n\nimport mmcv\nfrom mmcv"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/gather_train_benchmark_metric.py",
"chars": 5843,
"preview": "# Copyright (c) OpenMMLab. All rights reserved.\nimport argparse\nimport glob\nimport os.path as osp\n\nimport mmcv\nfrom gath"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/linter.sh",
"chars": 90,
"preview": "yapf -r -i mmdet/ configs/ tests/ tools/\nisort -rc mmdet/ configs/ tests/ tools/\nflake8 .\n"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/test_benchmark.sh",
"chars": 23366,
"preview": "PARTITION=$1\nCHECKPOINT_DIR=$2\n\necho 'configs/atss/atss_r50_fpn_1x_coco.py' &\nGPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 t"
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/test_init_backbone.py",
"chars": 6625,
"preview": "# Copyright (c) OpenMMLab. All rights reserved.\n\"\"\"Check out backbone whether successfully load pretrained checkpoint.\"\""
},
{
"path": "DLTA_AI_app/mmdetection/.dev_scripts/train_benchmark.sh",
"chars": 22182,
"preview": "echo 'configs/atss/atss_r50_fpn_1x_coco.py' &\nGPUS=8 GPUS_PER_NODE=8 CPUS_PER_TASK=2 ./tools/slurm_train.sh openmmlab "
},
{
"path": "DLTA_AI_app/mmdetection/.gitignore",
"chars": 1378,
"preview": "# Byte-compiled / optimized / DLL files\n__pycache__/\n*.py[cod]\n*$py.class\n\n# C extensions\n*.so\n\n# Distribution / packagi"
},
{
"path": "DLTA_AI_app/mmdetection/.owners.yml",
"chars": 200,
"preview": "assign:\n strategy:\n # random\n daily-shift-based\n scedule: \"*/1 * * * *\"\n assignees:\n - Czm369\n - hhaAndro"
},
{
"path": "DLTA_AI_app/mmdetection/.pre-commit-config.yaml",
"chars": 1452,
"preview": "repos:\n - repo: https://github.com/PyCQA/flake8\n rev: 5.0.4\n hooks:\n - id: flake8\n - repo: https://github.c"
},
{
"path": "DLTA_AI_app/mmdetection/.readthedocs.yml",
"chars": 151,
"preview": "version: 2\n\nformats: all\n\npython:\n version: 3.7\n install:\n - requirements: requirements/docs.txt\n - requirements"
},
{
"path": "DLTA_AI_app/mmdetection/CITATION.cff",
"chars": 273,
"preview": "cff-version: 1.2.0\nmessage: \"If you use this software, please cite it as below.\"\nauthors:\n - name: \"MMDetection Contrib"
},
{
"path": "DLTA_AI_app/mmdetection/LICENSE",
"chars": 11398,
"preview": "Copyright 2018-2023 OpenMMLab. All rights reserved.\n\n Apache License\n "
},
{
"path": "DLTA_AI_app/mmdetection/MANIFEST.in",
"chars": 205,
"preview": "include requirements/*.txt\ninclude mmdet/VERSION\ninclude mmdet/.mim/model-index.yml\ninclude mmdet/.mim/demo/*/*\nrecursiv"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/cityscapes_detection.py",
"chars": 1937,
"preview": "# dataset settings\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123."
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/cityscapes_instance.py",
"chars": 1968,
"preview": "# dataset settings\ndataset_type = 'CityscapesDataset'\ndata_root = 'data/cityscapes/'\nimg_norm_cfg = dict(\n mean=[123."
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/coco_detection.py",
"chars": 1711,
"preview": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28,"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/coco_instance.py",
"chars": 1737,
"preview": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28,"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/coco_instance_semantic.py",
"chars": 1922,
"preview": "# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28,"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/coco_panoptic.py",
"chars": 2079,
"preview": "# dataset settings\ndataset_type = 'CocoPanopticDataset'\ndata_root = 'data/coco/'\nimg_norm_cfg = dict(\n mean=[123.675,"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/deepfashion.py",
"chars": 1888,
"preview": "# dataset settings\ndataset_type = 'DeepFashionDataset'\ndata_root = 'data/DeepFashion/In-shop/'\nimg_norm_cfg = dict(\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/lvis_v0.5_instance.py",
"chars": 786,
"preview": "# dataset settings\n_base_ = 'coco_instance.py'\ndataset_type = 'LVISV05Dataset'\ndata_root = 'data/lvis_v0.5/'\ndata = dict"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/lvis_v1_instance.py",
"chars": 736,
"preview": "# dataset settings\n_base_ = 'coco_instance.py'\ndataset_type = 'LVISV1Dataset'\ndata_root = 'data/lvis_v1/'\ndata = dict(\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/openimages_detection.py",
"chars": 2731,
"preview": "# dataset settings\ndataset_type = 'OpenImagesDataset'\ndata_root = 'data/OpenImages/'\nimg_norm_cfg = dict(\n mean=[123."
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/voc0712.py",
"chars": 1916,
"preview": "# dataset settings\ndataset_type = 'VOCDataset'\ndata_root = 'data/VOCdevkit/'\nimg_norm_cfg = dict(\n mean=[123.675, 116"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/datasets/wider_face.py",
"chars": 2011,
"preview": "# dataset settings\ndataset_type = 'WIDERFaceDataset'\ndata_root = 'data/WIDERFace/'\nimg_norm_cfg = dict(mean=[123.675, 11"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/default_runtime.py",
"chars": 791,
"preview": "checkpoint_config = dict(interval=1)\n# yapf:disable\nlog_config = dict(\n interval=50,\n hooks=[\n dict(type='T"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/cascade_mask_rcnn_r50_fpn.py",
"chars": 6950,
"preview": "# model settings\nmodel = dict(\n type='CascadeRCNN',\n backbone=dict(\n type='ResNet',\n depth=50,\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/cascade_rcnn_r50_fpn.py",
"chars": 6325,
"preview": "# model settings\nmodel = dict(\n type='CascadeRCNN',\n backbone=dict(\n type='ResNet',\n depth=50,\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/fast_rcnn_r50_fpn.py",
"chars": 2060,
"preview": "# model settings\nmodel = dict(\n type='FastRCNN',\n backbone=dict(\n type='ResNet',\n depth=50,\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_c4.py",
"chars": 3827,
"preview": "# model settings\nnorm_cfg = dict(type='BN', requires_grad=False)\nmodel = dict(\n type='FasterRCNN',\n backbone=dict("
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/faster_rcnn_r50_caffe_dc5.py",
"chars": 3479,
"preview": "# model settings\nnorm_cfg = dict(type='BN', requires_grad=False)\nmodel = dict(\n type='FasterRCNN',\n backbone=dict("
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/faster_rcnn_r50_fpn.py",
"chars": 3632,
"preview": "# model settings\nmodel = dict(\n type='FasterRCNN',\n backbone=dict(\n type='ResNet',\n depth=50,\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/mask_rcnn_r50_caffe_c4.py",
"chars": 4061,
"preview": "# model settings\nnorm_cfg = dict(type='BN', requires_grad=False)\nmodel = dict(\n type='MaskRCNN',\n backbone=dict(\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/mask_rcnn_r50_fpn.py",
"chars": 4054,
"preview": "# model settings\nmodel = dict(\n type='MaskRCNN',\n backbone=dict(\n type='ResNet',\n depth=50,\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/retinanet_r50_fpn.py",
"chars": 1767,
"preview": "# model settings\nmodel = dict(\n type='RetinaNet',\n backbone=dict(\n type='ResNet',\n depth=50,\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/rpn_r50_caffe_c4.py",
"chars": 1788,
"preview": "# model settings\nmodel = dict(\n type='RPN',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_s"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/rpn_r50_fpn.py",
"chars": 1807,
"preview": "# model settings\nmodel = dict(\n type='RPN',\n backbone=dict(\n type='ResNet',\n depth=50,\n num_s"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/models/ssd300.py",
"chars": 1734,
"preview": "# model settings\ninput_size = 300\nmodel = dict(\n type='SingleStageDetector',\n backbone=dict(\n type='SSDVGG'"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/schedules/schedule_1x.py",
"chars": 319,
"preview": "# optimizer\noptimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=N"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/schedules/schedule_20e.py",
"chars": 320,
"preview": "# optimizer\noptimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=N"
},
{
"path": "DLTA_AI_app/mmdetection/configs/_base_/schedules/schedule_2x.py",
"chars": 320,
"preview": "# optimizer\noptimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)\noptimizer_config = dict(grad_clip=N"
},
{
"path": "DLTA_AI_app/mmdetection/configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py",
"chars": 2276,
"preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\nimg_norm_cfg = dict(\n mean=[123.675, 116.28, 103.53], std=[58.39"
},
{
"path": "DLTA_AI_app/mmdetection/configs/atss/atss_r101_fpn_1x_coco.py",
"chars": 192,
"preview": "_base_ = './atss_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n depth=101,\n init_cfg=dict(type='Pre"
},
{
"path": "DLTA_AI_app/mmdetection/configs/atss/atss_r50_fpn_1x_coco.py",
"chars": 1925,
"preview": "_base_ = [\n '../_base_/datasets/coco_detection.py',\n '../_base_/schedules/schedule_1x.py', '../_base_/default_runt"
},
{
"path": "DLTA_AI_app/mmdetection/configs/atss/metafile.yml",
"chars": 1772,
"preview": "Collections:\n - Name: ATSS\n Metadata:\n Training Data: COCO\n Training Techniques:\n - SGD with Moment"
},
{
"path": "DLTA_AI_app/mmdetection/configs/autoassign/autoassign_r50_fpn_8x2_1x_coco.py",
"chars": 2672,
"preview": "# We follow the original implementation which\n# adopts the Caffe pre-trained backbone.\n_base_ = [\n '../_base_/dataset"
},
{
"path": "DLTA_AI_app/mmdetection/configs/autoassign/metafile.yml",
"chars": 1056,
"preview": "Collections:\n - Name: AutoAssign\n Metadata:\n Training Data: COCO\n Training Techniques:\n - SGD with "
},
{
"path": "DLTA_AI_app/mmdetection/configs/carafe/faster_rcnn_r50_fpn_carafe_1x_coco.py",
"chars": 1640,
"preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n neck=dict(\n type='FPN_CARAFE',\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py",
"chars": 1971,
"preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n neck=dict(\n type='FPN_CARAFE',\n in_"
},
{
"path": "DLTA_AI_app/mmdetection/configs/carafe/metafile.yml",
"chars": 1757,
"preview": "Collections:\n - Name: CARAFE\n Metadata:\n Training Data: COCO\n Training Techniques:\n - SGD with Mome"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_1x_coco.py",
"chars": 230,
"preview": "_base_ = './cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n depth=101,\n init"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_caffe_fpn_mstrain_3x_coco.py",
"chars": 238,
"preview": "_base_ = './cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py'\nmodel = dict(\n backbone=dict(\n depth=101,\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py",
"chars": 205,
"preview": "_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n depth=101,\n init_cfg=d"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_20e_coco.py",
"chars": 206,
"preview": "_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n backbone=dict(\n depth=101,\n init_cfg="
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r101_fpn_mstrain_3x_coco.py",
"chars": 213,
"preview": "_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'\nmodel = dict(\n backbone=dict(\n depth=101,\n in"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_1x_coco.py",
"chars": 1426,
"preview": "_base_ = ['./cascade_mask_rcnn_r50_fpn_1x_coco.py']\n\nmodel = dict(\n backbone=dict(\n norm_cfg=dict(requires_gra"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_caffe_fpn_mstrain_3x_coco.py",
"chars": 1631,
"preview": "_base_ = ['./cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py']\nmodel = dict(\n backbone=dict(\n norm_cfg=dict(requi"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py",
"chars": 182,
"preview": "_base_ = [\n '../_base_/models/cascade_mask_rcnn_r50_fpn.py',\n '../_base_/datasets/coco_instance.py',\n '../_base"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py",
"chars": 183,
"preview": "_base_ = [\n '../_base_/models/cascade_mask_rcnn_r50_fpn.py',\n '../_base_/datasets/coco_instance.py',\n '../_base"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py",
"chars": 110,
"preview": "_base_ = [\n '../common/mstrain_3x_coco_instance.py',\n '../_base_/models/cascade_mask_rcnn_r50_fpn.py'\n]\n"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py",
"chars": 427,
"preview": "_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n type='ResNeXt',\n depth"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_20e_coco.py",
"chars": 428,
"preview": "_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n backbone=dict(\n type='ResNeXt',\n dept"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py",
"chars": 435,
"preview": "_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'\nmodel = dict(\n backbone=dict(\n type='ResNeXt',\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py",
"chars": 1878,
"preview": "_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'\n\nmodel = dict(\n backbone=dict(\n type='ResNeXt',\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_1x_coco.py",
"chars": 427,
"preview": "_base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n type='ResNeXt',\n depth"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py",
"chars": 428,
"preview": "_base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n backbone=dict(\n type='ResNeXt',\n dept"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py",
"chars": 435,
"preview": "_base_ = './cascade_mask_rcnn_r50_fpn_mstrain_3x_coco.py'\nmodel = dict(\n backbone=dict(\n type='ResNeXt',\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_caffe_fpn_1x_coco.py",
"chars": 225,
"preview": "_base_ = './cascade_rcnn_r50_caffe_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n depth=101,\n init_cfg="
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py",
"chars": 200,
"preview": "_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n depth=101,\n init_cfg=dict(t"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r101_fpn_20e_coco.py",
"chars": 201,
"preview": "_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n backbone=dict(\n depth=101,\n init_cfg=dict("
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_caffe_fpn_1x_coco.py",
"chars": 1389,
"preview": "_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'\n\nmodel = dict(\n backbone=dict(\n norm_cfg=dict(requires_grad=False"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py",
"chars": 178,
"preview": "_base_ = [\n '../_base_/models/cascade_rcnn_r50_fpn.py',\n '../_base_/datasets/coco_detection.py',\n '../_base_/sc"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py",
"chars": 149,
"preview": "_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'\n# learning policy\nlr_config = dict(step=[16, 19])\nrunner = dict(type='Epoch"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_1x_coco.py",
"chars": 422,
"preview": "_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n type='ResNeXt',\n depth=101,"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_32x4d_fpn_20e_coco.py",
"chars": 423,
"preview": "_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n backbone=dict(\n type='ResNeXt',\n depth=101"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_1x_coco.py",
"chars": 446,
"preview": "_base_ = './cascade_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n type='CascadeRCNN',\n backbone=dict(\n type='ResN"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/cascade_rcnn_x101_64x4d_fpn_20e_coco.py",
"chars": 447,
"preview": "_base_ = './cascade_rcnn_r50_fpn_20e_coco.py'\nmodel = dict(\n type='CascadeRCNN',\n backbone=dict(\n type='Res"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rcnn/metafile.yml",
"chars": 19093,
"preview": "Collections:\n - Name: Cascade R-CNN\n Metadata:\n Training Data: COCO\n Training Techniques:\n - SGD wi"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rpn/crpn_fast_rcnn_r50_caffe_fpn_1x_coco.py",
"chars": 2833,
"preview": "_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n type='ResNet',\n dep"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rpn/crpn_faster_rcnn_r50_caffe_fpn_1x_coco.py",
"chars": 3490,
"preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py'\nrpn_weight = 0.7\nmodel = dict(\n rpn_head=dict(\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rpn/crpn_r50_caffe_fpn_1x_coco.py",
"chars": 2750,
"preview": "_base_ = '../rpn/rpn_r50_caffe_fpn_1x_coco.py'\nmodel = dict(\n rpn_head=dict(\n _delete_=True,\n type='Cas"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cascade_rpn/metafile.yml",
"chars": 1525,
"preview": "Collections:\n - Name: Cascade RPN\n Metadata:\n Training Data: COCO\n Training Techniques:\n - SGD with"
},
{
"path": "DLTA_AI_app/mmdetection/configs/centernet/centernet_resnet18_140e_coco.py",
"chars": 91,
"preview": "_base_ = './centernet_resnet18_dcnv2_140e_coco.py'\n\nmodel = dict(neck=dict(use_dcn=False))\n"
},
{
"path": "DLTA_AI_app/mmdetection/configs/centernet/centernet_resnet18_dcnv2_140e_coco.py",
"chars": 4250,
"preview": "_base_ = [\n '../_base_/datasets/coco_detection.py',\n '../_base_/schedules/schedule_1x.py', '../_base_/default_runt"
},
{
"path": "DLTA_AI_app/mmdetection/configs/centernet/metafile.yml",
"chars": 1493,
"preview": "Collections:\n - Name: CenterNet\n Metadata:\n Training Data: COCO\n Training Techniques:\n - SGD with M"
},
{
"path": "DLTA_AI_app/mmdetection/configs/centripetalnet/centripetalnet_hourglass104_mstest_16x6_210e_coco.py",
"chars": 3653,
"preview": "_base_ = [\n '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'\n]\n\n# model settings\nmodel = dict(\n"
},
{
"path": "DLTA_AI_app/mmdetection/configs/centripetalnet/metafile.yml",
"chars": 1339,
"preview": "Collections:\n - Name: CentripetalNet\n Metadata:\n Training Data: COCO\n Training Techniques:\n - Adam\n"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cityscapes/faster_rcnn_r50_fpn_1x_cityscapes.py",
"chars": 1648,
"preview": "_base_ = [\n '../_base_/models/faster_rcnn_r50_fpn.py',\n '../_base_/datasets/cityscapes_detection.py',\n '../_bas"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py",
"chars": 1910,
"preview": "_base_ = [\n '../_base_/models/mask_rcnn_r50_fpn.py',\n '../_base_/datasets/cityscapes_instance.py', '../_base_/defa"
},
{
"path": "DLTA_AI_app/mmdetection/configs/common/lsj_100e_coco_instance.py",
"chars": 3054,
"preview": "_base_ = '../_base_/default_runtime.py'\n# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_nor"
},
{
"path": "DLTA_AI_app/mmdetection/configs/common/mstrain-poly_3x_coco_instance.py",
"chars": 2516,
"preview": "_base_ = '../_base_/default_runtime.py'\n# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_nor"
},
{
"path": "DLTA_AI_app/mmdetection/configs/common/mstrain_3x_coco.py",
"chars": 2428,
"preview": "_base_ = '../_base_/default_runtime.py'\n# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_nor"
},
{
"path": "DLTA_AI_app/mmdetection/configs/common/mstrain_3x_coco_instance.py",
"chars": 2466,
"preview": "_base_ = '../_base_/default_runtime.py'\n# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_nor"
},
{
"path": "DLTA_AI_app/mmdetection/configs/common/ssj_270k_coco_instance.py",
"chars": 3189,
"preview": "_base_ = '../_base_/default_runtime.py'\n# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_nor"
},
{
"path": "DLTA_AI_app/mmdetection/configs/common/ssj_scp_270k_coco_instance.py",
"chars": 3325,
"preview": "_base_ = '../_base_/default_runtime.py'\n# dataset settings\ndataset_type = 'CocoDataset'\ndata_root = 'data/coco/'\nimg_nor"
},
{
"path": "DLTA_AI_app/mmdetection/configs/convnext/cascade_mask_rcnn_convnext-s_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py",
"chars": 1078,
"preview": "_base_ = './cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py' # noqa\n\n# please install mmcls"
},
{
"path": "DLTA_AI_app/mmdetection/configs/convnext/cascade_mask_rcnn_convnext-t_p4_w7_fpn_giou_4conv1f_fp16_ms-crop_3x_coco.py",
"chars": 5693,
"preview": "_base_ = [\n '../_base_/models/cascade_mask_rcnn_r50_fpn.py',\n '../_base_/datasets/coco_instance.py',\n '../_base"
},
{
"path": "DLTA_AI_app/mmdetection/configs/convnext/mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco.py",
"chars": 3419,
"preview": "_base_ = [\n '../_base_/models/mask_rcnn_r50_fpn.py',\n '../_base_/datasets/coco_instance.py',\n '../_base_/schedu"
},
{
"path": "DLTA_AI_app/mmdetection/configs/convnext/metafile.yml",
"chars": 3486,
"preview": "Models:\n - Name: mask_rcnn_convnext-t_p4_w7_fpn_fp16_ms-crop_3x_coco\n In Collection: Mask R-CNN\n Config: configs/"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py",
"chars": 3592,
"preview": "_base_ = [\n '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'\n]\n\n# model settings\nmodel = dict(\n"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_32x3_210e_coco.py",
"chars": 3592,
"preview": "_base_ = [\n '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'\n]\n\n# model settings\nmodel = dict(\n"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cornernet/cornernet_hourglass104_mstest_8x6_210e_coco.py",
"chars": 3591,
"preview": "_base_ = [\n '../_base_/default_runtime.py', '../_base_/datasets/coco_detection.py'\n]\n\n# model settings\nmodel = dict(\n"
},
{
"path": "DLTA_AI_app/mmdetection/configs/cornernet/metafile.yml",
"chars": 2801,
"preview": "Collections:\n - Name: CornerNet\n Metadata:\n Training Data: COCO\n Training Techniques:\n - Adam\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py",
"chars": 222,
"preview": "_base_ = '../cascade_rcnn/cascade_mask_rcnn_r101_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type="
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py",
"chars": 221,
"preview": "_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py",
"chars": 228,
"preview": "_base_ = '../cascade_rcnn/cascade_mask_rcnn_x101_32x4d_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py",
"chars": 217,
"preview": "_base_ = '../cascade_rcnn/cascade_rcnn_r101_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCN'"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py",
"chars": 216,
"preview": "_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCN',"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py",
"chars": 215,
"preview": "_base_ = '../faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCN', "
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py",
"chars": 214,
"preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCN', d"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py",
"chars": 408,
"preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n roi_head=dict(\n bbox_roi_extractor=dic"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py",
"chars": 557,
"preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n type='ResNeXt',\n "
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py",
"chars": 211,
"preview": "_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCN', defo"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py",
"chars": 210,
"preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCN', defor"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py",
"chars": 240,
"preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCN', defor"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcn/metafile.yml",
"chars": 9291,
"preview": "Collections:\n - Name: Deformable Convolutional Networks\n Metadata:\n Training Data: COCO\n Training Techniqu"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py",
"chars": 216,
"preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCNv2',"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py",
"chars": 216,
"preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCNv2',"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py",
"chars": 417,
"preview": "_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n roi_head=dict(\n bbox_roi_extractor=dic"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcnv2/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py",
"chars": 242,
"preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCNv2', def"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py",
"chars": 212,
"preview": "_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py'\nmodel = dict(\n backbone=dict(\n dcn=dict(type='DCNv2', def"
},
{
"path": "DLTA_AI_app/mmdetection/configs/dcnv2/metafile.yml",
"chars": 4203,
"preview": "Collections:\n - Name: Deformable Convolutional Networks v2\n Metadata:\n Training Data: COCO\n Training Techn"
},
{
"path": "DLTA_AI_app/mmdetection/configs/ddod/ddod_r50_fpn_1x_coco.py",
"chars": 2101,
"preview": "_base_ = [\n '../_base_/datasets/coco_detection.py',\n '../_base_/schedules/schedule_1x.py', '../_base_/default_runt"
},
{
"path": "DLTA_AI_app/mmdetection/configs/ddod/metafile.yml",
"chars": 951,
"preview": "Collections:\n - Name: DDOD\n Metadata:\n Training Data: COCO\n Training Techniques:\n - SGD with Moment"
},
{
"path": "DLTA_AI_app/mmdetection/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py",
"chars": 351,
"preview": "_base_ = [\n '../_base_/models/mask_rcnn_r50_fpn.py',\n '../_base_/datasets/deepfashion.py', '../_base_/schedules/sc"
},
{
"path": "DLTA_AI_app/mmdetection/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py",
"chars": 6666,
"preview": "_base_ = [\n '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'\n]\nmodel = dict(\n type='Deformab"
}
]
// ... and 1215 more files (download for full content)
About this extraction
This page contains the full source code of the 0ssamaak0/DLTA-AI GitHub repository, extracted and formatted as plain text for AI agents and large language models (LLMs). The extraction includes 1415 files (5.9 MB), approximately 1.6M tokens, and a symbol index with 4064 extracted functions, classes, methods, constants, and types. Use this with OpenClaw, Claude, ChatGPT, Cursor, Windsurf, or any other AI tool that accepts text input. You can copy the full output to your clipboard or download it as a .txt file.
Extracted by GitExtract — free GitHub repo to text converter for AI. Built by Nikandr Surkov.